From 8b5063262d1e85bcef17909fd76d02a67430aec4 Mon Sep 17 00:00:00 2001 From: gongsiyi150 <“623965126@qq.comm> Date: Tue, 30 Jun 2020 00:11:05 +0800 Subject: [PATCH] add English documents. --- content/en/docs.lnk | Bin 0 -> 890 bytes .../Administratorguide/Administratorguide.md | 3 + .../en/docs/Administratorguide/audit-logs.md | 6 + .../backup-and-restoration.md | 9 + .../docs/Administratorguide/check-method-0.md | 350 + .../docs/Administratorguide/check-method-2.md | 40 + .../docs/Administratorguide/check-method.md | 60 + .../checking-and-deleting-logs.md | 11 + .../checking-database-performance.md | 7 + .../checking-opengauss-health-status.md | 7 + .../checking-opengauss-run-logs.md | 111 + .../Administratorguide/checking-os-logs.md | 14 + .../checking-os-parameters.md | 7 + ...g-the-number-of-application-connections.md | 126 + .../checking-time-consistency.md | 46 + .../Administratorguide/cleaning-run-logs.md | 25 + .../data-security-maintenance-suggestions.md | 23 + .../exception-handling-1.md | 370 + .../exception-handling-3.md | 124 + .../Administratorguide/exception-handling.md | 106 + .../generating-configuration-files.md | 61 + .../docs/Administratorguide/gs_basebackup.md | 125 + content/en/docs/Administratorguide/gs_dump.md | 561 ++ .../en/docs/Administratorguide/gs_dumpall.md | 253 + .../en/docs/Administratorguide/gs_restore.md | 387 + .../docs/Administratorguide/log-overview.md | 55 + .../docs/Administratorguide/log-reference.md | 15 + .../logical-backup-and-restoration.md | 9 + .../docs/Administratorguide/operation-logs.md | 30 + .../en/docs/Administratorguide/overview.md | 103 + .../Administratorguide/performance-logs.md | 22 + .../physical-backup-and-restoration.md | 5 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes .../Administratorguide/querying-status.md | 106 + .../Administratorguide/risky-operations.md | 98 + .../routine-maintenance-check-items.md | 170 + .../Administratorguide/routine-maintenance.md | 23 + .../routinely-maintaining-tables.md | 106 + .../routinely-recreating-an-index.md | 64 + .../starting-and-stopping-opengauss.md | 62 + .../en/docs/Administratorguide/system-logs.md | 25 + content/en/docs/Administratorguide/wals.md | 29 + .../en/docs/Compilationguide/Compilation.md | 12 + .../compiling-open-source-software.md | 76 + .../compiling-the-installation-package.md | 47 + .../Compilationguide/compiling-the-version.md | 15 + .../configuring-environment-variables.md | 6 + .../docs/Compilationguide/downloading-code.md | 25 + content/en/docs/Compilationguide/faqs.md | 11 + .../figures/\347\273\230\345\233\2761.png" | Bin 0 -> 17768 bytes .../Compilationguide/hardware-requirements.md | 11 + ...rary-files-generated-during-compilation.md | 23 + ...signal-terminated-program-cclplus-error.md | 12 + ...-bytes-after-a-total-of-xxx-bytes-error.md | 12 + ...ompiler-cannot-create-executables-error.md | 12 + .../introduction-to-build-sh.md | 75 + .../en/docs/Compilationguide/introduction.md | 7 + .../docs/Compilationguide/os-requirements.md | 7 + content/en/docs/Compilationguide/overview.md | 4 + .../preparation-before-compiling.md | 9 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes content/en/docs/Compilationguide/purpose.md | 4 + .../setting-up-the-compilation-environment.md | 9 + .../software-compilation-and-installation.md | 120 + .../software-dependency-requirements.md | 53 + .../Compilationguide/software-requirements.md | 7 + content/en/docs/Description/Description.md | 4 + .../docs/Description/application-scenarios.md | 11 + content/en/docs/Description/basic-features.md | 33 + .../en/docs/Description/data-partitioning.md | 48 + .../en/docs/Description/enhanced-features.md | 17 + .../figures/opengauss-logical-components.png | Bin 0 -> 30495 bytes ...7\345\255\230\345\274\225\346\223\216.png" | Bin 0 -> 96516 bytes ...\241\214\345\274\225\346\223\216(png).png" | Bin 0 -> 12657 bytes .../Description/ha-transaction-processing.md | 15 + .../high-concurrency-and-high-performance.md | 4 + content/en/docs/Description/memory-table.md | 4 + .../docs/Description/operating-environment.md | 15 + .../en/docs/Description/primary-standby.md | 6 + .../docs/Description/product-positioning.md | 8 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes .../en/docs/Description/sql-self-diagnosis.md | 8 + .../docs/Description/system-architecture.md | 46 + .../Description/technical-specifications.md | 81 + ...or-and-hybrid-row-column-storage-engine.md | 52 + .../en/docs/Developerguide/Developerguide.md | 79 + content/en/docs/Developerguide/abort.md | 70 + .../Developerguide/additional-features.md | 11 + .../en/docs/Developerguide/administration.md | 41 + .../en/docs/Developerguide/administrators.md | 44 + .../Developerguide/advisory-lock-functions.md | 171 + .../Developerguide/aggregate-functions.md | 673 ++ content/en/docs/Developerguide/ai-features.md | 9 + .../en/docs/Developerguide/alarm-detection.md | 47 + .../docs/Developerguide/alter-data-source.md | 103 + .../en/docs/Developerguide/alter-database.md | 142 + .../alter-default-privileges.md | 141 + .../en/docs/Developerguide/alter-directory.md | 41 + .../en/docs/Developerguide/alter-function.md | 190 + content/en/docs/Developerguide/alter-group.md | 56 + content/en/docs/Developerguide/alter-index.md | 165 + .../docs/Developerguide/alter-large-object.md | 36 + content/en/docs/Developerguide/alter-role.md | 118 + .../alter-row-level-security-policy.md | 107 + .../en/docs/Developerguide/alter-schema.md | 76 + .../en/docs/Developerguide/alter-sequence.md | 76 + .../en/docs/Developerguide/alter-session.md | 85 + .../en/docs/Developerguide/alter-synonym.md | 58 + .../alter-system-kill-session.md | 50 + .../Developerguide/alter-table-partition.md | 244 + content/en/docs/Developerguide/alter-table.md | 431 ++ .../docs/Developerguide/alter-tablespace.md | 119 + .../alter-text-search-configuration.md | 175 + .../alter-text-search-dictionary.md | 101 + .../en/docs/Developerguide/alter-trigger.md | 45 + content/en/docs/Developerguide/alter-type.md | 152 + content/en/docs/Developerguide/alter-user.md | 111 + content/en/docs/Developerguide/alter-view.md | 130 + .../en/docs/Developerguide/analyze-analyse.md | 192 + .../en/docs/Developerguide/analyze-table.md | 40 + .../analyzing-hardware-bottlenecks.md | 13 + .../docs/Developerguide/anonymous-blocks.md | 21 + .../en/docs/Developerguide/api-reference.md | 9 + content/en/docs/Developerguide/apis.md | 36 + content/en/docs/Developerguide/appendix.md | 9 + .../application-development-guide.md | 13 + content/en/docs/Developerguide/archiving.md | 50 + .../docs/Developerguide/array-expressions.md | 94 + .../array-functions-and-operators.md | 386 + .../docs/Developerguide/arrays-and-records.md | 7 + content/en/docs/Developerguide/arrays.md | 29 + .../Developerguide/assignment-statements.md | 26 + .../asynchronous-i-o-operations.md | 111 + .../en/docs/Developerguide/audit-switch.md | 107 + content/en/docs/Developerguide/auditing.md | 9 + .../Developerguide/automatic-vacuuming.md | 192 + .../docs/Developerguide/background-writer.md | 49 + ...ackup-and-restoration-control-functions.md | 205 + .../docs/Developerguide/basic-statements.md | 14 + .../en/docs/Developerguide/basic-structure.md | 47 + .../Developerguide/basic-text-matching.md | 53 + .../docs/Developerguide/before-you-start.md | 146 + content/en/docs/Developerguide/begin.md | 58 + .../benchmarksql-an-open-source-tpc-c-tool.md | 14 + .../en/docs/Developerguide/best-practices.md | 79 + .../en/docs/Developerguide/bgwriter_stat.md | 95 + .../docs/Developerguide/binary-data-types.md | 74 + .../binary-string-functions-and-operators.md | 172 + content/en/docs/Developerguide/bios-16.md | 17 + content/en/docs/Developerguide/bios.md | 2 + .../bit-string-functions-and-operators.md | 144 + .../docs/Developerguide/bit-string-types.md | 44 + .../docs/Developerguide/boolean-data-types.md | 72 + .../docs/Developerguide/branch-statements.md | 56 + content/en/docs/Developerguide/cache-io.md | 61 + .../en/docs/Developerguide/call-statement.md | 38 + content/en/docs/Developerguide/call.md | 79 + .../Developerguide/character-data-types.md | 152 + ...cter-processing-functions-and-operators.md | 1506 ++++ .../checking-blocked-statements.md | 70 + ...king-the-number-of-database-connections.md | 133 + .../en/docs/Developerguide/checkpoint-22.md | 30 + .../en/docs/Developerguide/checkpoint-32.md | 30 + content/en/docs/Developerguide/checkpoint.md | 30 + .../en/docs/Developerguide/checkpoints-41.md | 111 + content/en/docs/Developerguide/checkpoints.md | 111 + .../docs/Developerguide/class_vital_info.md | 47 + .../client-access-authentication.md | 15 + content/en/docs/Developerguide/close.md | 40 + .../Developerguide/closing-a-connection.md | 6 + content/en/docs/Developerguide/cluster.md | 124 + .../Developerguide/command-reference-13.md | 44 + .../docs/Developerguide/command-reference.md | 100 + content/en/docs/Developerguide/comment.md | 148 + .../en/docs/Developerguide/commissioning.md | 160 + content/en/docs/Developerguide/commit-end.md | 67 + .../en/docs/Developerguide/commit-prepared.md | 42 + .../common-faults-and-identification.md | 9 + .../communication-library-parameters.md | 215 + .../Developerguide/comparison-disk-vs-mot.md | 115 + .../Developerguide/comparison-operators.md | 52 + .../compatibility-with-earlier-versions.md | 127 + .../Developerguide/competitive-overview.md | 262 + content/en/docs/Developerguide/concepts.md | 22 + .../concurrency-control-mechanism.md | 48 + .../concurrent-data-import-and-queries.md | 33 + ...ent-insert-and-delete-in-the-same-table.md | 33 + .../concurrent-insert-in-the-same-table.md | 33 + .../concurrent-update-in-the-same-table.md | 34 + .../concurrent-write-examples.md | 17 + .../Developerguide/condition-expressions.md | 214 + .../conditional-expression-functions.md | 171 + .../Developerguide/conditional-statements.md | 102 + .../en/docs/Developerguide/config_settings.md | 130 + .../Developerguide/configuration-examples.md | 111 + .../configuration-file-reference.md | 133 + .../configuration-settings-functions.md | 39 + .../en/docs/Developerguide/configuration.md | 7 + .../en/docs/Developerguide/configurations.md | 15 + ...nfiguring-a-data-source-in-the-linux-os.md | 446 ++ .../configuring-a-remote-connection.md | 41 + ...onfiguring-client-access-authentication.md | 115 + .../configuring-database-audit.md | 11 + ...uring-file-permission-security-policies.md | 176 + .../docs/Developerguide/configuring-llvm.md | 13 + .../configuring-running-parameters.md | 7 + .../confirming-connection-information.md | 48 + .../connecting-to-a-database-0.md | 140 + .../connecting-to-a-database.md | 16 + .../connecting-to-the-database-(using-ssl).md | 136 + .../connection-and-authentication.md | 9 + .../Developerguide/connection-characters.md | 88 + .../connection-pool-parameters.md | 37 + .../Developerguide/connection-settings.md | 143 + .../docs/Developerguide/constant-and-macro.md | 113 + .../constraints-on-index-use.md | 37 + .../docs/Developerguide/control-statements.md | 17 + .../Developerguide/controlling-text-search.md | 11 + .../controlling-transactions.md | 23 + .../docs/Developerguide/conversion-example.md | 68 + ...onverting-a-disk-table-into-a-mot-table.md | 13 + content/en/docs/Developerguide/converting.md | 14 + content/en/docs/Developerguide/copy.md | 615 ++ content/en/docs/Developerguide/copymanager.md | 109 + ...core-dump-occurs-due-to-full-disk-space.md | 16 + ...settings-of-guc-parameter-log_directory.md | 14 + .../Developerguide/core-fault-locating.md | 7 + .../Developerguide/cost-based-vacuum-delay.md | 69 + content/en/docs/Developerguide/cpu.md | 117 + .../docs/Developerguide/create-data-source.md | 92 + .../en/docs/Developerguide/create-database.md | 170 + .../docs/Developerguide/create-directory.md | 55 + .../en/docs/Developerguide/create-function.md | 296 + .../en/docs/Developerguide/create-group.md | 56 + .../en/docs/Developerguide/create-index.md | 314 + .../docs/Developerguide/create-procedure.md | 95 + content/en/docs/Developerguide/create-role.md | 236 + .../create-row-level-security-policy.md | 224 + .../en/docs/Developerguide/create-schema.md | 81 + .../en/docs/Developerguide/create-sequence.md | 140 + .../en/docs/Developerguide/create-synonym.md | 107 + .../en/docs/Developerguide/create-table-as.md | 129 + .../Developerguide/create-table-partition.md | 637 ++ .../en/docs/Developerguide/create-table.md | 925 +++ .../docs/Developerguide/create-tablespace.md | 131 + .../create-text-search-configuration.md | 123 + .../create-text-search-dictionary.md | 143 + .../en/docs/Developerguide/create-trigger.md | 352 + content/en/docs/Developerguide/create-type.md | 274 + content/en/docs/Developerguide/create-user.md | 114 + content/en/docs/Developerguide/create-view.md | 74 + .../Developerguide/creating-an-index-30.md | 59 + .../docs/Developerguide/creating-an-index.md | 59 + .../creating-and-managing-databases.md | 99 + .../creating-and-managing-indexes.md | 238 + ...reating-and-managing-partitioned-tables.md | 200 + .../creating-and-managing-schemas.md | 155 + .../creating-and-managing-sequences.md | 78 + .../creating-and-managing-tables.md | 13 + .../creating-and-managing-tablespaces.md | 175 + .../creating-and-managing-views.md | 55 + .../creating-dropping-a-mot-table.md | 22 + .../en/docs/Developerguide/creating-tables.md | 28 + content/en/docs/Developerguide/cursor-loop.md | 18 + .../docs/Developerguide/cursor-operations.md | 8 + content/en/docs/Developerguide/cursor.md | 65 + content/en/docs/Developerguide/cursors.md | 11 + ...-by-a-user-without-required-permissions.md | 125 + .../data-import-using-copy-from-stdin.md | 9 + .../Developerguide/data-type-conversion.md | 156 + .../en/docs/Developerguide/data-types-22.md | 4 + .../en/docs/Developerguide/data-types-36.md | 4 + ...-types-supported-by-column-store-tables.md | 214 + content/en/docs/Developerguide/data-types.md | 35 + .../database-connection-control-functions.md | 23 + .../database-logical-architecture.md | 14 + .../database-object-functions.md | 454 ++ .../database-security-management.md | 9 + .../database-statement-execution-functions.md | 29 + ...time-processing-functions-and-operators.md | 1336 ++++ .../en/docs/Developerguide/date-time-types.md | 616 ++ .../en/docs/Developerguide/dbe_perf-schema.md | 41 + .../Developerguide/dcl-syntax-overview.md | 78 + .../Developerguide/ddl-syntax-overview.md | 327 + content/en/docs/Developerguide/deallocate.md | 33 + content/en/docs/Developerguide/debugging.md | 122 + .../en/docs/Developerguide/declare-syntax.md | 9 + content/en/docs/Developerguide/declare.md | 91 + content/en/docs/Developerguide/deep-copy.md | 13 + .../docs/Developerguide/default-mot-conf.md | 17 + .../default-permission-mechanism.md | 14 + .../default-settings-of-client-connection.md | 9 + .../en/docs/Developerguide/define-variable.md | 56 + content/en/docs/Developerguide/delete.md | 106 + .../deleting-data-from-a-table.md | 32 + content/en/docs/Developerguide/deployment.md | 11 + content/en/docs/Developerguide/description.md | 99 + .../docs/Developerguide/design-principles.md | 12 + content/en/docs/Developerguide/design.md | 18 + ...rmining-the-scope-of-performance-tuning.md | 46 + .../docs/Developerguide/developer-options.md | 662 ++ .../development-based-on-jdbc.md | 31 + .../development-based-on-libpq.md | 6 + .../development-based-on-odbc.md | 61 + .../Developerguide/development-process-1.md | 104 + .../Developerguide/development-process.md | 5 + .../development-specifications.md | 9 + .../en/docs/Developerguide/dictionaries.md | 17 + content/en/docs/Developerguide/disk-space.md | 29 + content/en/docs/Developerguide/disk-ssd.md | 15 + .../Developerguide/dml-syntax-overview.md | 60 + content/en/docs/Developerguide/do.md | 51 + .../Developerguide/doing-vacuum-to-a-table.md | 17 + .../docs/Developerguide/drop-data-source.md | 51 + .../en/docs/Developerguide/drop-database.md | 51 + .../en/docs/Developerguide/drop-directory.md | 39 + .../en/docs/Developerguide/drop-function.md | 50 + content/en/docs/Developerguide/drop-group.md | 26 + content/en/docs/Developerguide/drop-index.md | 50 + content/en/docs/Developerguide/drop-owned.md | 34 + .../en/docs/Developerguide/drop-procedure.md | 33 + content/en/docs/Developerguide/drop-role.md | 37 + .../drop-row-level-security-policy.md | 53 + content/en/docs/Developerguide/drop-schema.md | 47 + .../en/docs/Developerguide/drop-sequence.md | 49 + .../en/docs/Developerguide/drop-synonym.md | 39 + content/en/docs/Developerguide/drop-table.md | 44 + .../en/docs/Developerguide/drop-tablespace.md | 48 + .../drop-text-search-configuration.md | 43 + .../drop-text-search-dictionary.md | 52 + .../en/docs/Developerguide/drop-trigger.md | 47 + content/en/docs/Developerguide/drop-type.md | 39 + content/en/docs/Developerguide/drop-user.md | 53 + content/en/docs/Developerguide/drop-view.md | 41 + .../en/docs/Developerguide/durability-20.md | 19 + content/en/docs/Developerguide/durability.md | 8 + .../docs/Developerguide/dynamic-statements.md | 11 + .../dynamically-calling-anonymous-blocks.md | 23 + .../dynamically-calling-stored-procedures.md | 22 + .../Developerguide/environment-deployment.md | 231 + content/en/docs/Developerguide/error-log.md | 29 + .../error-reporting-and-logging.md | 11 + .../error-trapping-statements.md | 112 + .../errors-returned-to-the-user.md | 341 + .../errors-written-the-log-file.md | 76 + ...e-tcp-ip-connections-in-ssh-tunnel-mode.md | 30 + ...g-secure-tcp-ip-connections-in-ssl-mode.md | 543 ++ ...-and-exporting-data-through-local-files.md | 111 + ...ng-data-from-a-my-database-to-opengauss.md | 91 + ...ysql-database-to-the-opengauss-database.md | 91 + content/en/docs/Developerguide/example-3.md | 446 ++ .../example-common-operations.md | 218 + ...-and-exporting-data-through-local-files.md | 113 + ...e-retrying-sql-queries-for-applications.md | 199 + content/en/docs/Developerguide/example.md | 339 + content/en/docs/Developerguide/examples.md | 339 + .../docs/Developerguide/exception-handling.md | 59 + content/en/docs/Developerguide/execute.md | 54 + .../executing-dynamic-non-query-statements.md | 56 + .../executing-dynamic-query-statements.md | 50 + .../experience-in-rewriting-sql-statements.md | 57 + .../en/docs/Developerguide/explain-plan.md | 79 + content/en/docs/Developerguide/explain.md | 247 + .../en/docs/Developerguide/explicit-cursor.md | 81 + .../Developerguide/exporting-a-database.md | 129 + .../docs/Developerguide/exporting-a-schema.md | 144 + .../exporting-a-single-database.md | 9 + .../docs/Developerguide/exporting-a-table.md | 168 + .../exporting-all-databases-6.md | 95 + .../Developerguide/exporting-all-databases.md | 7 + .../en/docs/Developerguide/exporting-data.md | 5 + .../exporting-global-objects.md | 92 + content/en/docs/Developerguide/expressions.md | 13 + ...extended-fdw-and-other-gaussdb-features.md | 48 + .../docs/Developerguide/extended-functions.md | 41 + .../en/docs/Developerguide/extended-syntax.md | 298 + .../external-support-tools-orange.md | 13 + content/en/docs/Developerguide/faqs.md | 29 + .../en/docs/Developerguide/fault-tolerance.md | 99 + .../Developerguide/features-and-benefits.md | 16 + content/en/docs/Developerguide/fetch.md | 214 + ...\350\260\203\346\225\2641\357\274\211.png" | Bin 0 -> 76589 bytes ...\350\260\203\346\225\2642\357\274\211.png" | Bin 0 -> 83041 bytes ...47\244\272\344\276\2133-1\357\274\211.png" | Bin 0 -> 64246 bytes ...\347\244\272\344\276\2133\357\274\211.png" | Bin 0 -> 65673 bytes .../en/docs/Developerguide/figures/all.png | Bin 0 -> 2167 bytes .../figures/anonymous_block.png | Bin 0 -> 3905 bytes .../docs/Developerguide/figures/any-some.png | Bin 0 -> 3304 bytes ...tion-development-process-based-on-jdbc.png | Bin 0 -> 32979 bytes .../figures/assignment_value.png | Bin 0 -> 1525 bytes .../figures/call_anonymous_block.png | Bin 0 -> 8713 bytes .../Developerguide/figures/call_clause.png | Bin 0 -> 3274 bytes .../Developerguide/figures/call_procedure.png | Bin 0 -> 5805 bytes .../en/docs/Developerguide/figures/case.jpg | Bin 0 -> 11302 bytes .../docs/Developerguide/figures/case_when.png | Bin 0 -> 7223 bytes .../Developerguide/figures/close_cursor.jpg | Bin 0 -> 3457 bytes .../docs/Developerguide/figures/coalesce.png | Bin 0 -> 2148 bytes .../figures/cursor_typename.png | Bin 0 -> 2365 bytes .../figures/database-logical-architecture.png | Bin 0 -> 29796 bytes .../figures/declare_variable.png | Bin 0 -> 2823 bytes .../en/docs/Developerguide/figures/decode.png | Bin 0 -> 3098 bytes .../figures/dynamic_cursor_define.png | Bin 0 -> 1502 bytes .../figures/en-us_image_0242381460.png | Bin 0 -> 2177 bytes .../figures/en-us_image_0242381461.png | Bin 0 -> 2000 bytes .../figures/en-us_image_0242381462.png | Bin 0 -> 1164 bytes .../figures/en-us_image_0242381463.png | Bin 0 -> 837 bytes .../figures/en-us_image_0242381464.png | Bin 0 -> 1969 bytes .../figures/en-us_image_0242381725.png | Bin 0 -> 308881 bytes .../figures/en-us_image_0243595915.png | Bin 0 -> 51964 bytes .../figures/en-us_image_0244851037.png | Bin 0 -> 53601 bytes .../figures/en-us_image_0246254080.png | Bin 0 -> 38952 bytes .../figures/en-us_image_0246254081.png | Bin 0 -> 35366 bytes .../figures/en-us_image_0246254082.png | Bin 0 -> 27239 bytes .../figures/en-us_image_0252660975.png | Bin 0 -> 52545 bytes .../figures/en-us_image_0252663634.png | Bin 0 -> 15359 bytes .../figures/en-us_image_0253028833.png | Bin 0 -> 77804 bytes .../figures/en-us_image_0253030479.png | Bin 0 -> 77957 bytes .../figures/en-us_image_0253032870.png | Bin 0 -> 79715 bytes .../figures/en-us_image_0253036670.png | Bin 0 -> 77939 bytes .../figures/en-us_image_0253037239.png | Bin 0 -> 75433 bytes .../figures/en-us_image_0253038757.png | Bin 0 -> 15090 bytes .../figures/en-us_image_0253082069.png | Bin 0 -> 25606 bytes .../figures/en-us_image_0253403489.png | Bin 0 -> 2587 bytes .../figures/en-us_image_0253403490.png | Bin 0 -> 2406 bytes .../figures/en-us_image_0253404022.png | Bin 0 -> 2002 bytes .../figures/en-us_image_0253404023.png | Bin 0 -> 1908 bytes .../figures/en-us_image_0257713415.png | Bin 0 -> 233727 bytes .../figures/en-us_image_0257713417.png | Bin 0 -> 313839 bytes .../figures/en-us_image_0257713419.png | Bin 0 -> 222950 bytes .../figures/en-us_image_0257713431.png | Bin 0 -> 46962 bytes .../figures/en-us_image_0257713433.png | Bin 0 -> 128445 bytes .../figures/en-us_image_0257713435.png | Bin 0 -> 45403 bytes .../figures/en-us_image_0257713439.png | Bin 0 -> 67525 bytes .../figures/en-us_image_0257713448.png | Bin 0 -> 5544 bytes .../figures/en-us_image_0257713450.png | Bin 0 -> 1854 bytes .../figures/en-us_image_0257713454.png | Bin 0 -> 6603 bytes .../figures/en-us_image_0257713456.png | Bin 0 -> 109259 bytes .../figures/en-us_image_0257806512.png | Bin 0 -> 54542 bytes .../figures/en-us_image_0257806513.png | Bin 0 -> 51429 bytes .../figures/en-us_image_0257839664.png | Bin 0 -> 37578 bytes .../figures/en-us_image_0257843947.png | Bin 0 -> 1854 bytes .../figures/en-us_image_0257843950.jpg | Bin 0 -> 187934 bytes .../figures/en-us_image_0257854512.png | Bin 0 -> 2000 bytes .../figures/en-us_image_0257854550.png | Bin 0 -> 2177 bytes .../figures/en-us_image_0257854609.png | Bin 0 -> 1969 bytes .../figures/en-us_image_0257854718.png | Bin 0 -> 77804 bytes .../figures/en-us_image_0257854722.png | Bin 0 -> 27239 bytes .../figures/en-us_image_0257854726.png | Bin 0 -> 1164 bytes .../figures/en-us_image_0257854894.png | Bin 0 -> 35366 bytes .../figures/en-us_image_0257854911.png | Bin 0 -> 308881 bytes .../figures/en-us_image_0257854947.png | Bin 0 -> 1908 bytes .../figures/en-us_image_0257855009.png | Bin 0 -> 15090 bytes .../figures/en-us_image_0257855024.png | Bin 0 -> 15359 bytes .../figures/en-us_image_0257855073.png | Bin 0 -> 25606 bytes .../figures/en-us_image_0257855157.png | Bin 0 -> 52545 bytes .../figures/en-us_image_0257855235.png | Bin 0 -> 2002 bytes .../figures/en-us_image_0257855271.png | Bin 0 -> 38952 bytes .../figures/en-us_image_0257855327.png | Bin 0 -> 75433 bytes .../figures/en-us_image_0257855330.png | Bin 0 -> 77939 bytes .../figures/en-us_image_0257855378.png | Bin 0 -> 77957 bytes .../figures/en-us_image_0257855379.png | Bin 0 -> 79715 bytes .../figures/en-us_image_0257855432.png | Bin 0 -> 51964 bytes .../figures/en-us_image_0257855450.png | Bin 0 -> 53601 bytes .../figures/en-us_image_0257855460.png | Bin 0 -> 2406 bytes .../figures/en-us_image_0257855485.png | Bin 0 -> 837 bytes .../figures/en-us_image_0257855494.png | Bin 0 -> 2587 bytes .../figures/en-us_image_0257856189.png | Bin 0 -> 58879 bytes .../figures/en-us_image_0257856190.png | Bin 0 -> 28845 bytes .../figures/en-us_image_0257856191.png | Bin 0 -> 37961 bytes .../figures/en-us_image_0257856192.png | Bin 0 -> 29596 bytes .../figures/en-us_image_0257856193.png | Bin 0 -> 69756 bytes .../figures/en-us_image_0257860033.png | Bin 0 -> 77121 bytes ...xecute-immediate-dynamic_select_clause.png | Bin 0 -> 5564 bytes ...lated-sql-statements-by-the-sql-engine.png | Bin 0 -> 59509 bytes .../figures/exists-not-exists.png | Bin 0 -> 2885 bytes .../Developerguide/figures/fetch_cursor.png | Bin 0 -> 3142 bytes .../Developerguide/figures/for_as_loop.png | Bin 0 -> 3924 bytes .../docs/Developerguide/figures/for_loop.png | Bin 0 -> 5430 bytes .../Developerguide/figures/for_loop_query.png | Bin 0 -> 3913 bytes .../en/docs/Developerguide/figures/forall.png | Bin 0 -> 3745 bytes .../figures/gaussdb-system-architecture.png | Bin 0 -> 60330 bytes .../docs/Developerguide/figures/greatest.png | Bin 0 -> 2197 bytes .../docs/Developerguide/figures/if_then.jpg | Bin 0 -> 7459 bytes .../Developerguide/figures/if_then_else.jpg | Bin 0 -> 9206 bytes .../figures/if_then_elsif_else.png | Bin 0 -> 5987 bytes .../docs/Developerguide/figures/in-not-in.png | Bin 0 -> 2970 bytes .../figures/integrating-the-mot-engine.png | Bin 0 -> 43805 bytes .../en/docs/Developerguide/figures/least.png | Bin 0 -> 1983 bytes .../en/docs/Developerguide/figures/loop.png | Bin 0 -> 2180 bytes ...imized-storage-engine-within-opengauss.png | Bin 0 -> 59547 bytes .../figures/mot-architecture.png | Bin 0 -> 109259 bytes .../docs/Developerguide/figures/noselect.png | Bin 0 -> 3258 bytes .../en/docs/Developerguide/figures/nullif.png | Bin 0 -> 1693 bytes .../en/docs/Developerguide/figures/nvl.jpg | Bin 0 -> 4295 bytes ...-based-application-development-process.png | Bin 0 -> 8294 bytes .../figures/odbc-system-structure.png | Bin 0 -> 17823 bytes .../figures/open_dynamic_cursor.png | Bin 0 -> 4313 bytes .../docs/Developerguide/figures/open_for.png | Bin 0 -> 3940 bytes .../figures/open_static_cursor.png | Bin 0 -> 3070 bytes .../figures/opengauss-performance-tuning.png | Bin 0 -> 54108 bytes .../opengauss-service-response-process.jpg | Bin 0 -> 62869 bytes ...ory-(for-all-the-transactions-of-all-t.png | Bin 0 -> 45403 bytes .../en/docs/Developerguide/figures/raise.png | Bin 0 -> 927 bytes .../figures/raise_condition.png | Bin 0 -> 4070 bytes .../Developerguide/figures/raise_format.png | Bin 0 -> 5233 bytes .../Developerguide/figures/raise_option.png | Bin 0 -> 3000 bytes .../Developerguide/figures/raise_sqlstate.png | Bin 0 -> 4553 bytes .../Developerguide/figures/return_clause.jpg | Bin 0 -> 2263 bytes .../figures/sql-execution-plan-example.png | Bin 0 -> 15359 bytes .../figures/static_cursor_define.jpg | Bin 0 -> 11369 bytes .../figures/syntax-of-the-record-type.png | Bin 0 -> 20418 bytes .../en/docs/Developerguide/figures/url.png | Bin 0 -> 7896 bytes .../Developerguide/figures/using_clause-0.png | Bin 0 -> 2437 bytes .../Developerguide/figures/using_clause-1.png | Bin 0 -> 4831 bytes .../Developerguide/figures/using_clause-2.png | Bin 0 -> 4831 bytes .../Developerguide/figures/using_clause.png | Bin 0 -> 3702 bytes .../Developerguide/figures/when_clause.png | Bin 0 -> 2805 bytes .../Developerguide/figures/while_loop.png | Bin 0 -> 3453 bytes .../figures/zh-cn_image_0118861065.jpg | Bin 0 -> 66854 bytes .../figures/\346\226\207\346\241\243.png" | Bin 0 -> 24624 bytes .../en/docs/Developerguide/file-location.md | 63 + content/en/docs/Developerguide/file.md | 21 + content/en/docs/Developerguide/file_iostat.md | 109 + .../docs/Developerguide/file_redo_iostat.md | 67 + .../Developerguide/full-text-retrieval.md | 35 + .../docs/Developerguide/full-text-search.md | 21 + .../Developerguide/functions-and-operators.md | 55 + ...ons-for-asynchronous-command-processing.md | 26 + ...tions-for-canceling-queries-in-progress.md | 11 + content/en/docs/Developerguide/functions.md | 91 + .../docs/Developerguide/garbage-collection.md | 27 + .../gathering-document-statistics.md | 44 + .../docs/Developerguide/general-guidelines.md | 28 + .../Developerguide/generating-certificates.md | 269 + .../Developerguide/genetic-query-optimizer.md | 99 + .../geometric-functions-and-operators.md | 929 +++ content/en/docs/Developerguide/geometric.md | 179 + content/en/docs/Developerguide/gin-indexes.md | 11 + .../Developerguide/gin-tips-and-tricks.md | 21 + .../Developerguide/global_bgwriter_stat.md | 102 + .../docs/Developerguide/global_ckpt_status.md | 67 + .../Developerguide/global_config_settings.md | 137 + .../global_double_write_status.md | 95 + .../docs/Developerguide/global_file_iostat.md | 116 + .../Developerguide/global_file_redo_iostat.md | 74 + .../Developerguide/global_instance_time.md | 46 + .../en/docs/Developerguide/global_locks.md | 131 + .../global_memory_node_detail.md | 40 + .../Developerguide/global_operator_history.md | 173 + .../global_operator_history_table.md | 4 + .../Developerguide/global_operator_runtime.md | 180 + .../docs/Developerguide/global_os_runtime.md | 60 + .../docs/Developerguide/global_os_threads.md | 53 + .../global_pagewriter_status.md | 74 + .../global_record_reset_time.md | 32 + .../Developerguide/global_recovery_status.md | 81 + .../docs/Developerguide/global_redo_status.md | 179 + .../docs/Developerguide/global_rel_iostat.md | 53 + .../global_replication_slots.md | 95 + .../Developerguide/global_replication_stat.md | 131 + .../Developerguide/global_session_memory.md | 53 + .../global_session_memory_detail.md | 81 + .../Developerguide/global_session_stat.md | 60 + .../global_session_stat_activity.md | 170 + .../Developerguide/global_session_time.md | 53 + .../global_shared_memory_detail.md | 67 + .../Developerguide/global_stat_all_indexes.md | 81 + .../Developerguide/global_stat_all_tables.md | 172 + .../Developerguide/global_stat_bad_block.md | 74 + .../Developerguide/global_stat_database.md | 158 + .../global_stat_database_conflicts.md | 74 + .../docs/Developerguide/global_stat_db_cu.md | 53 + .../Developerguide/global_stat_session_cu.md | 39 + .../Developerguide/global_stat_sys_indexes.md | 81 + .../Developerguide/global_stat_sys_tables.md | 172 + .../global_stat_user_functions.md | 67 + .../global_stat_user_indexes.md | 81 + .../Developerguide/global_stat_user_tables.md | 172 + .../global_stat_xact_all_tables.md | 102 + .../global_stat_xact_sys_tables.md | 102 + .../global_stat_xact_user_functions.md | 67 + .../global_stat_xact_user_tables.md | 102 + .../global_statement_complex_history.md | 496 ++ .../global_statement_complex_history_table.md | 4 + .../global_statement_complex_runtime.md | 356 + .../Developerguide/global_statement_count.md | 200 + .../global_statio_all_indexes.md | 74 + .../global_statio_all_sequences.md | 60 + .../global_statio_all_tables.md | 102 + .../global_statio_sys_indexes.md | 74 + .../global_statio_sys_sequences.md | 60 + .../global_statio_sys_tables.md | 102 + .../global_statio_user_indexes.md | 74 + .../global_statio_user_sequences.md | 60 + .../global_statio_user_tables.md | 102 + .../global_thread_wait_status.md | 106 + .../global_threadpool_status.md | 4 + .../global_transactions_prepared_xacts.md | 53 + .../docs/Developerguide/global_wait_events.md | 81 + .../global_workload_transaction.md | 116 + .../en/docs/Developerguide/goto-statements.md | 128 + content/en/docs/Developerguide/grant.md | 452 ++ .../granting-user-permissions.md | 19 + .../en/docs/Developerguide/gs_basebackup.md | 6 + .../gs_ctl-(full-and-incremental).md | 10 + content/en/docs/Developerguide/gs_dump.md | 4 + .../en/docs/Developerguide/gs_file_stat.md | 109 + .../docs/Developerguide/gs_instance_time.md | 50 + .../en/docs/Developerguide/gs_opt_model.md | 139 + .../en/docs/Developerguide/gs_os_run_info.md | 53 + .../en/docs/Developerguide/gs_redo_stat.md | 67 + content/en/docs/Developerguide/gs_restore.md | 4 + .../gs_session_cpu_statistics.md | 88 + .../docs/Developerguide/gs_session_memory.md | 46 + .../gs_session_memory_detail.md | 78 + .../gs_session_memory_statistics.md | 91 + .../en/docs/Developerguide/gs_session_stat.md | 53 + .../en/docs/Developerguide/gs_session_time.md | 46 + .../en/docs/Developerguide/gs_sql_count.md | 204 + .../en/docs/Developerguide/gs_stat_db_cu.md | 53 + .../docs/Developerguide/gs_stat_session_cu.md | 46 + .../Developerguide/gs_thread_memory_detail.md | 83 + .../Developerguide/gs_total_memory_detail.md | 40 + .../Developerguide/gs_wlm_instance_history.md | 127 + .../Developerguide/gs_wlm_operator_history.md | 6 + .../Developerguide/gs_wlm_operator_info.md | 173 + .../gs_wlm_operator_statistics.md | 180 + .../gs_wlm_plan_encoding_table.md | 67 + .../gs_wlm_plan_operator_history.md | 6 + .../gs_wlm_plan_operator_info.md | 137 + .../gs_wlm_rebuild_user_resource_pool.md | 4 + .../Developerguide/gs_wlm_resource_pool.md | 88 + .../Developerguide/gs_wlm_session_history.md | 496 ++ .../Developerguide/gs_wlm_session_info_all.md | 4 + .../gs_wlm_session_query_info_all.md | 517 ++ .../gs_wlm_session_statistics.md | 363 + .../docs/Developerguide/gs_wlm_user_info.md | 88 + .../gs_wlm_user_resource_history.md | 144 + .../Developerguide/guc-parameter-usage.md | 10 + .../en/docs/Developerguide/guc-parameters.md | 55 + .../en/docs/Developerguide/ha-replication.md | 9 + .../Developerguide/highlighting-results.md | 64 + .../docs/Developerguide/hint-based-tuning.md | 17 + ...int-errors-conflicts-and-other-warnings.md | 63 + content/en/docs/Developerguide/i-o.md | 76 + .../en/docs/Developerguide/implementation.md | 18 + .../en/docs/Developerguide/implicit-cursor.md | 50 + .../en/docs/Developerguide/importing-data.md | 53 + content/en/docs/Developerguide/indexes.md | 20 + content/en/docs/Developerguide/insert.md | 160 + .../inserting-data-to-tables.md | 219 + content/en/docs/Developerguide/instance.md | 7 + .../en/docs/Developerguide/instance_time.md | 50 + ...ation-using-foreign-data-wrappers-(fdw).md | 29 + .../en/docs/Developerguide/introducing-mot.md | 17 + .../en/docs/Developerguide/introduction-20.md | 10 + .../en/docs/Developerguide/introduction-34.md | 10 + .../introduction-to-the-copymanager-class.md | 109 + .../introduction-to-the-sql-execution-plan.md | 7 + .../en/docs/Developerguide/introduction.md | 11 + .../docs/Developerguide/isolation-levels.md | 114 + .../docs/Developerguide/ispell-dictionary.md | 44 + .../java-sql-callablestatement.md | 135 + .../Developerguide/java-sql-connection.md | 119 + .../java-sql-databasemetadata.md | 438 ++ .../en/docs/Developerguide/java-sql-driver.md | 53 + .../java-sql-preparedstatement.md | 157 + .../docs/Developerguide/java-sql-resultset.md | 239 + .../java-sql-resultsetmetadata.md | 46 + .../docs/Developerguide/java-sql-statement.md | 106 + .../Developerguide/javax-naming-context.md | 88 + .../javax-naming-spi-initialcontextfactory.md | 25 + .../javax-sql-connectionpooldatasource.md | 60 + .../Developerguide/javax-sql-datasource.md | 60 + .../javax-sql-pooledconnection.md | 60 + .../jdbc-interface-reference.md | 4 + ...kage-driver-class-and-environment-class.md | 65 + content/en/docs/Developerguide/jdbc.md | 33 + content/en/docs/Developerguide/jit.md | 25 + .../Developerguide/join-operation-hints.md | 35 + .../docs/Developerguide/join-order-hints.md | 62 + .../en/docs/Developerguide/json-functions.md | 37 + content/en/docs/Developerguide/json-types.md | 6 + .../Developerguide/kernel-resource-usage.md | 34 + content/en/docs/Developerguide/keywords.md | 6298 +++++++++++++++++ content/en/docs/Developerguide/libpq.md | 15 + content/en/docs/Developerguide/limitations.md | 10 + ...-application-scenarios-and-restrictions.md | 35 + .../en/docs/Developerguide/load-management.md | 373 + .../docs/Developerguide/loading-the-driver.md | 13 + .../docs/Developerguide/local_rel_iostat.md | 46 + .../Developerguide/local_threadpool_status.md | 69 + content/en/docs/Developerguide/lock-25.md | 7 + content/en/docs/Developerguide/lock-39.md | 7 + .../en/docs/Developerguide/lock-management.md | 124 + .../en/docs/Developerguide/lock-operations.md | 4 + content/en/docs/Developerguide/lock.md | 293 + content/en/docs/Developerguide/locks.md | 138 + content/en/docs/Developerguide/log-replay.md | 68 + content/en/docs/Developerguide/logging-21.md | 62 + .../en/docs/Developerguide/logging-content.md | 389 + .../Developerguide/logging-destination.md | 162 + .../en/docs/Developerguide/logging-time.md | 183 + content/en/docs/Developerguide/logging.md | 10 + .../docs/Developerguide/logical-operators.md | 93 + .../logical-replication-functions.md | 127 + .../en/docs/Developerguide/loop-statements.md | 131 + .../Developerguide/maintaining-audit-logs.md | 196 + .../managing-concurrent-write-operations.md | 11 + .../managing-ssl-certificates.md | 9 + .../Developerguide/managing-transactions.md | 42 + .../managing-users-and-their-permissions.md | 21 + .../Developerguide/manipulating-queries.md | 51 + .../Developerguide/manipulating-tsvector.md | 26 + .../mathematical-functions-and-operators.md | 826 +++ content/en/docs/Developerguide/memory-24.md | 11 + content/en/docs/Developerguide/memory-25.md | 69 + content/en/docs/Developerguide/memory-26.md | 279 + content/en/docs/Developerguide/memory-38.md | 11 + content/en/docs/Developerguide/memory-40.md | 279 + .../memory-and-storage-planning.md | 9 + .../docs/Developerguide/memory-management.md | 4 + .../memory-optimized-storage-engine.md | 17 + .../en/docs/Developerguide/memory-planning.md | 112 + content/en/docs/Developerguide/memory.md | 69 + .../docs/Developerguide/memory_node_detail.md | 40 + content/en/docs/Developerguide/memsql.md | 24 + content/en/docs/Developerguide/merge-into.md | 136 + .../docs/Developerguide/microsoft-hekaton.md | 8 + .../miscellaneous-parameters.md | 387 + .../Developerguide/mode-matching-operators.md | 311 + content/en/docs/Developerguide/monetary.md | 47 + content/en/docs/Developerguide/monitoring.md | 116 + .../en/docs/Developerguide/mot-concepts.md | 29 + .../mot-configuration-settings.md | 47 + .../docs/Developerguide/mot-error-messages.md | 14 + .../docs/Developerguide/mot-introduction.md | 24 + .../Developerguide/mot-key-technologies.md | 20 + .../en/docs/Developerguide/mot-statistics.md | 10 + .../Developerguide/mot-table-limitations.md | 13 + content/en/docs/Developerguide/mot.md | 9 + content/en/docs/Developerguide/move.md | 75 + content/en/docs/Developerguide/mpp_tables.md | 60 + content/en/docs/Developerguide/network-17.md | 82 + content/en/docs/Developerguide/network-26.md | 60 + ...network-address-functions-and-operators.md | 452 ++ .../en/docs/Developerguide/network-address.md | 175 + content/en/docs/Developerguide/network.md | 60 + .../docs/Developerguide/non-unique-indexes.md | 8 + .../en/docs/Developerguide/null-statements.md | 20 + .../numa-awareness-allocation-and-affinity.md | 14 + .../docs/Developerguide/numeric-data-types.md | 417 ++ content/en/docs/Developerguide/nvme-disk.md | 8 + .../Developerguide/object-identifier-types.md | 191 + content/en/docs/Developerguide/object.md | 87 + .../obtaining-help-information-12.md | 31 + .../obtaining-help-information.md | 45 + .../occ-vs-2pl-differences-by-example.md | 196 + .../odbc-interface-reference.md | 4 + ...es-dependent-libraries-and-header-files.md | 6 + content/en/docs/Developerguide/odbc.md | 51 + .../en/docs/Developerguide/opengauss-sql.md | 31 + .../Developerguide/opengauss-transaction.md | 125 + .../docs/Developerguide/operation-auditing.md | 309 + content/en/docs/Developerguide/operator.md | 15 + .../docs/Developerguide/operator_history.md | 4 + .../Developerguide/operator_history_table.md | 173 + .../docs/Developerguide/operator_runtime.md | 180 + content/en/docs/Developerguide/operators.md | 82 + .../optimistic-concurrency-control.md | 37 + .../optimizer-cost-constants.md | 87 + .../optimizer-method-configuration.md | 335 + .../optimizing-concurrent-queue-parameters.md | 20 + .../optimizing-database-memory-parameters.md | 26 + .../optimizing-database-parameters.md | 9 + .../Developerguide/optimizing-operators.md | 54 + .../optimizing-os-parameters.md | 152 + .../optimizing-sql-self-diagnosis.md | 54 + .../Developerguide/optimizing-statistics.md | 93 + .../Developerguide/optimizing-subqueries.md | 501 ++ ...le-in-memory-option-and-oracle-timesten.md | 30 + ...inal-information-table-of-wdr-snapshots.md | 7 + .../Developerguide/os-environment-settings.md | 93 + .../docs/Developerguide/os-kernel-and-boot.md | 82 + content/en/docs/Developerguide/os.md | 11 + content/en/docs/Developerguide/os_runtime.md | 53 + content/en/docs/Developerguide/os_threads.md | 53 + .../other-default-parameters.md | 48 + ...ther-factors-affecting-llvm-performance.md | 17 + .../en/docs/Developerguide/other-functions.md | 250 + .../docs/Developerguide/other-operations.md | 13 + .../Developerguide/other-optimizer-options.md | 327 + .../docs/Developerguide/other-statements.md | 7 + .../Developerguide/other-system-functions.md | 2310 ++++++ content/en/docs/Developerguide/overview-10.md | 4 + content/en/docs/Developerguide/overview-15.md | 46 + content/en/docs/Developerguide/overview-16.md | 27 + content/en/docs/Developerguide/overview-17.md | 9 + content/en/docs/Developerguide/overview-18.md | 47 + content/en/docs/Developerguide/overview-19.md | 33 + content/en/docs/Developerguide/overview-2.md | 471 ++ content/en/docs/Developerguide/overview-23.md | 36 + content/en/docs/Developerguide/overview-24.md | 46 + content/en/docs/Developerguide/overview-27.md | 27 + content/en/docs/Developerguide/overview-28.md | 9 + content/en/docs/Developerguide/overview-29.md | 47 + content/en/docs/Developerguide/overview-31.md | 33 + content/en/docs/Developerguide/overview-37.md | 36 + content/en/docs/Developerguide/overview-5.md | 77 + content/en/docs/Developerguide/overview-7.md | 6 + content/en/docs/Developerguide/overview-8.md | 4 + ...iew-of-system-catalogs-and-system-views.md | 13 + content/en/docs/Developerguide/overview.md | 11 + .../Developerguide/parallel-data-import.md | 52 + content/en/docs/Developerguide/parser.md | 338 + .../docs/Developerguide/parsing-documents.md | 46 + .../en/docs/Developerguide/parsing-queries.md | 68 + .../Developerguide/performance-benchmarks.md | 5 + .../performance-metric-tpm-c.md | 37 + .../Developerguide/performance-statistics.md | 28 + .../docs/Developerguide/performance-tuning.md | 11 + ...table-and-truncating-the-original-table.md | 36 + ...y-using-the-create-table-like-statement.md | 31 + ...opy-by-using-the-create-table-statement.md | 40 + .../en/docs/Developerguide/pg_aggregate.md | 111 + content/en/docs/Developerguide/pg_am.md | 299 + content/en/docs/Developerguide/pg_amop.md | 105 + content/en/docs/Developerguide/pg_amproc.md | 77 + .../pg_app_workloadgroup_mapping.md | 39 + content/en/docs/Developerguide/pg_attrdef.md | 53 + .../en/docs/Developerguide/pg_attribute.md | 193 + .../en/docs/Developerguide/pg_auth_history.md | 46 + .../en/docs/Developerguide/pg_auth_members.md | 46 + content/en/docs/Developerguide/pg_authid.md | 189 + .../pg_available_extension_versions.md | 74 + .../Developerguide/pg_available_extensions.md | 46 + content/en/docs/Developerguide/pg_cast.md | 62 + content/en/docs/Developerguide/pg_class.md | 379 + .../en/docs/Developerguide/pg_collation.md | 83 + .../en/docs/Developerguide/pg_constraint.md | 217 + .../en/docs/Developerguide/pg_conversion.md | 92 + content/en/docs/Developerguide/pg_cursors.md | 60 + content/en/docs/Developerguide/pg_database.md | 124 + .../docs/Developerguide/pg_db_role_setting.md | 39 + .../en/docs/Developerguide/pg_default_acl.md | 53 + content/en/docs/Developerguide/pg_depend.md | 93 + .../en/docs/Developerguide/pg_description.md | 58 + .../en/docs/Developerguide/pg_directory.md | 53 + content/en/docs/Developerguide/pg_enum.md | 60 + .../en/docs/Developerguide/pg_ext_stats.md | 140 + .../en/docs/Developerguide/pg_extension.md | 67 + .../pg_extension_data_source.md | 83 + .../Developerguide/pg_foreign_data_wrapper.md | 83 + .../docs/Developerguide/pg_foreign_server.md | 92 + .../docs/Developerguide/pg_foreign_table.md | 46 + .../Developerguide/pg_get_invalid_backends.md | 53 + .../pg_get_senders_catchup_time.md | 74 + content/en/docs/Developerguide/pg_group.md | 39 + content/en/docs/Developerguide/pg_index.md | 152 + content/en/docs/Developerguide/pg_indexes.md | 65 + content/en/docs/Developerguide/pg_inherits.md | 47 + content/en/docs/Developerguide/pg_job.md | 140 + content/en/docs/Developerguide/pg_job_proc.md | 39 + content/en/docs/Developerguide/pg_language.md | 101 + .../en/docs/Developerguide/pg_largeobject.md | 51 + .../Developerguide/pg_largeobject_metadata.md | 47 + content/en/docs/Developerguide/pg_locks.md | 165 + .../en/docs/Developerguide/pg_namespace.md | 53 + content/en/docs/Developerguide/pg_node_env.md | 67 + content/en/docs/Developerguide/pg_object.md | 61 + content/en/docs/Developerguide/pg_opclass.md | 105 + content/en/docs/Developerguide/pg_operator.md | 155 + content/en/docs/Developerguide/pg_opfamily.md | 69 + .../en/docs/Developerguide/pg_os_threads.md | 53 + .../en/docs/Developerguide/pg_partition.md | 225 + .../en/docs/Developerguide/pg_pltemplate.md | 74 + .../Developerguide/pg_prepared_statements.md | 53 + .../docs/Developerguide/pg_prepared_xacts.md | 65 + content/en/docs/Developerguide/pg_proc.md | 236 + content/en/docs/Developerguide/pg_range.md | 76 + .../Developerguide/pg_replication_slots.md | 88 + .../docs/Developerguide/pg_resource_pool.md | 109 + content/en/docs/Developerguide/pg_rewrite.md | 83 + .../en/docs/Developerguide/pg_rlspolicies.md | 67 + .../en/docs/Developerguide/pg_rlspolicy.md | 67 + content/en/docs/Developerguide/pg_roles.md | 236 + content/en/docs/Developerguide/pg_rules.md | 46 + content/en/docs/Developerguide/pg_seclabel.md | 67 + .../en/docs/Developerguide/pg_seclabels.md | 92 + .../docs/Developerguide/pg_session_iostat.md | 83 + .../docs/Developerguide/pg_session_wlmstat.md | 188 + content/en/docs/Developerguide/pg_settings.md | 130 + content/en/docs/Developerguide/pg_shadow.md | 184 + content/en/docs/Developerguide/pg_shdepend.md | 111 + .../docs/Developerguide/pg_shdescription.md | 51 + .../en/docs/Developerguide/pg_shseclabel.md | 62 + .../docs/Developerguide/pg_stat_activity.md | 178 + .../Developerguide/pg_stat_all_indexes.md | 76 + .../docs/Developerguide/pg_stat_all_tables.md | 172 + .../docs/Developerguide/pg_stat_bad_block.md | 81 + .../docs/Developerguide/pg_stat_bgwriter.md | 95 + .../docs/Developerguide/pg_stat_database.md | 151 + .../pg_stat_database_conflicts.md | 67 + .../Developerguide/pg_stat_replication.md | 123 + .../Developerguide/pg_stat_sys_indexes.md | 74 + .../docs/Developerguide/pg_stat_sys_tables.md | 172 + .../Developerguide/pg_stat_user_functions.md | 60 + .../Developerguide/pg_stat_user_indexes.md | 74 + .../Developerguide/pg_stat_user_tables.md | 172 + .../Developerguide/pg_stat_xact_all_tables.md | 95 + .../Developerguide/pg_stat_xact_sys_tables.md | 95 + .../pg_stat_xact_user_functions.md | 60 + .../pg_stat_xact_user_tables.md | 95 + .../Developerguide/pg_statio_all_indexes.md | 67 + .../Developerguide/pg_statio_all_sequences.md | 53 + .../Developerguide/pg_statio_all_tables.md | 95 + .../Developerguide/pg_statio_sys_indexes.md | 67 + .../Developerguide/pg_statio_sys_sequences.md | 53 + .../Developerguide/pg_statio_sys_tables.md | 95 + .../Developerguide/pg_statio_user_indexes.md | 67 + .../pg_statio_user_sequences.md | 53 + .../Developerguide/pg_statio_user_tables.md | 95 + .../en/docs/Developerguide/pg_statistic.md | 115 + .../docs/Developerguide/pg_statistic_ext.md | 115 + content/en/docs/Developerguide/pg_stats.md | 157 + content/en/docs/Developerguide/pg_tables.md | 83 + .../en/docs/Developerguide/pg_tablespace.md | 67 + content/en/docs/Developerguide/pg_tde_info.md | 41 + .../Developerguide/pg_thread_wait_status.md | 1306 ++++ .../Developerguide/pg_timezone_abbrevs.md | 39 + .../docs/Developerguide/pg_timezone_names.md | 46 + .../pg_total_user_resource_info.md | 143 + .../pg_total_user_resource_info_oid.md | 95 + content/en/docs/Developerguide/pg_trigger.md | 133 + .../en/docs/Developerguide/pg_ts_config.md | 76 + .../docs/Developerguide/pg_ts_config_map.md | 56 + content/en/docs/Developerguide/pg_ts_dict.md | 76 + .../en/docs/Developerguide/pg_ts_parser.md | 92 + .../en/docs/Developerguide/pg_ts_template.md | 65 + content/en/docs/Developerguide/pg_type.md | 244 + content/en/docs/Developerguide/pg_user.md | 151 + .../en/docs/Developerguide/pg_user_mapping.md | 58 + .../docs/Developerguide/pg_user_mappings.md | 76 + .../en/docs/Developerguide/pg_user_status.md | 61 + .../docs/Developerguide/pg_variable_info.md | 95 + content/en/docs/Developerguide/pg_views.md | 56 + .../docs/Developerguide/pg_wlm_statistics.md | 83 + .../docs/Developerguide/pg_workload_group.md | 46 + .../docs/Developerguide/pl-pgsql-functions.md | 20 + .../Developerguide/plan-hint-optimization.md | 260 + content/en/docs/Developerguide/plan_table.md | 87 + .../en/docs/Developerguide/plan_table_data.md | 100 + .../planning-a-storage-model.md | 119 + .../platform-and-client-compatibility.md | 53 + ...ntial-deadlocks-during-concurrent-write.md | 7 + content/en/docs/Developerguide/pqcancel.md | 54 + content/en/docs/Developerguide/pqclear.md | 39 + content/en/docs/Developerguide/pqconnectdb.md | 44 + .../docs/Developerguide/pqconnectdbparams.md | 55 + .../en/docs/Developerguide/pqconnectstart.md | 35 + .../en/docs/Developerguide/pqconninfoparse.md | 40 + .../en/docs/Developerguide/pqerrormessage.md | 39 + content/en/docs/Developerguide/pqexec.md | 51 + .../en/docs/Developerguide/pqexecparams.md | 77 + .../docs/Developerguide/pqexecparamsbatch.md | 83 + .../en/docs/Developerguide/pqexecprepared.md | 71 + .../Developerguide/pqexecpreparedbatch.md | 77 + content/en/docs/Developerguide/pqfinish.md | 39 + content/en/docs/Developerguide/pqflush.md | 43 + content/en/docs/Developerguide/pqfname.md | 45 + .../en/docs/Developerguide/pqfreecancel.md | 39 + content/en/docs/Developerguide/pqgetcancel.md | 43 + content/en/docs/Developerguide/pqgetvalue.md | 55 + content/en/docs/Developerguide/pqnfields.md | 39 + content/en/docs/Developerguide/pqntuples.md | 39 + content/en/docs/Developerguide/pqprepare.md | 71 + content/en/docs/Developerguide/pqreset.md | 39 + .../en/docs/Developerguide/pqresultstatus.md | 78 + .../en/docs/Developerguide/pqsendprepare.md | 67 + content/en/docs/Developerguide/pqsendquery.md | 48 + .../docs/Developerguide/pqsendqueryparams.md | 85 + .../Developerguide/pqsendqueryprepared.md | 79 + .../en/docs/Developerguide/pqsetdblogin.md | 80 + content/en/docs/Developerguide/pqstatus.md | 69 + content/en/docs/Developerguide/predictor.md | 13 + content/en/docs/Developerguide/preparation.md | 7 + .../Developerguide/prepare-transaction.md | 37 + content/en/docs/Developerguide/prepare.md | 41 + .../docs/Developerguide/prerequisite-check.md | 10 + .../en/docs/Developerguide/prerequisites.md | 41 + .../en/docs/Developerguide/primary-server.md | 144 + .../processing-data-in-a-result-set.md | 186 + .../en/docs/Developerguide/pseudo-types.md | 122 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes content/en/docs/Developerguide/query-28.md | 74 + content/en/docs/Developerguide/query-43.md | 74 + .../query-and-index-statistics-collector.md | 144 + .../Developerguide/query-execution-process.md | 75 + .../query-native-compilation-(jit).md | 8 + .../query-native-compilation-orange.md | 26 + .../en/docs/Developerguide/query-planning.md | 18 + .../query-request-handling-process.md | 5 + content/en/docs/Developerguide/query.md | 31 + .../Developerguide/querying-audit-results.md | 62 + ...statements-that-affect-performance-most.md | 83 + .../querying-system-catalogs.md | 137 + .../range-functions-and-operators.md | 394 ++ .../Developerguide/ranking-search-results.md | 102 + .../en/docs/Developerguide/reassign-owned.md | 34 + .../recommended-suggestions-for-llvm.md | 12 + content/en/docs/Developerguide/record.md | 114 + content/en/docs/Developerguide/recovery-18.md | 8 + content/en/docs/Developerguide/recovery-23.md | 18 + content/en/docs/Developerguide/recovery.md | 7 + content/en/docs/Developerguide/redo-log.md | 20 + content/en/docs/Developerguide/reindex.md | 139 + .../docs/Developerguide/release-savepoint.md | 63 + .../Developerguide/replacing-certificates.md | 62 + .../replication-and-high-availability.md | 12 + .../docs/Developerguide/replication_slots.md | 88 + .../docs/Developerguide/replication_stat.md | 123 + content/en/docs/Developerguide/reset.md | 63 + ...etting-key-parameters-during-sql-tuning.md | 74 + .../Developerguide/resetting-parameters.md | 635 ++ .../Developerguide/resource-consumption.md | 15 + .../result-linear-scale-up-many-core.md | 16 + .../en/docs/Developerguide/results-report.md | 12 + .../docs/Developerguide/retry-management.md | 21 + .../retrying-a-aborted-transaction-orange.md | 36 + .../return-next-and-return-query.md | 64 + .../docs/Developerguide/return-statements.md | 9 + content/en/docs/Developerguide/return.md | 17 + ...iewing-and-modifying-a-table-definition.md | 13 + content/en/docs/Developerguide/revoke.md | 149 + .../docs/Developerguide/rewriting-queries.md | 63 + content/en/docs/Developerguide/roles.md | 20 + .../docs/Developerguide/rollback-prepared.md | 28 + .../Developerguide/rollback-to-savepoint.md | 53 + content/en/docs/Developerguide/rollback.md | 38 + .../en/docs/Developerguide/row-expressions.md | 22 + .../row-level-access-control.md | 83 + content/en/docs/Developerguide/rows-hints.md | 42 + .../Developerguide/running-sql-statements.md | 162 + ...opy-from-stdin-statement-to-import-data.md | 11 + ...ing-the-insert-statement-to-insert-data.md | 15 + .../docs/Developerguide/sample-workloads.md | 7 + content/en/docs/Developerguide/savepoint.md | 93 + content/en/docs/Developerguide/scalability.md | 49 + .../scale-out-distributed-database.md | 34 + content/en/docs/Developerguide/scale-out.md | 8 + .../Developerguide/scale-up-architecture.md | 16 + .../Developerguide/scan-operation-hints.md | 42 + content/en/docs/Developerguide/schemas.md | 63 + ...eamless-integration-of-mot-with-gaussdb.md | 16 + .../docs/Developerguide/searching-a-table.md | 86 + .../Developerguide/secondary-index-support.md | 4 + ...ty-and-authentication-(postgresql-conf).md | 381 + .../docs/Developerguide/security-functions.md | 293 + content/en/docs/Developerguide/select-into.md | 69 + content/en/docs/Developerguide/select.md | 591 ++ .../Developerguide/selecting-a-data-type.md | 17 + .../selecting-a-storage-model.md | 27 + .../en/docs/Developerguide/sending-server.md | 115 + .../Developerguide/separation-of-duties.md | 188 + .../docs/Developerguide/sequence-functions.md | 122 + ...rver-optimization-arm-huawei-taishan-4p.md | 20 + .../Developerguide/server-optimization-x86.md | 28 + .../Developerguide/server-signal-functions.md | 53 + .../en/docs/Developerguide/session-thread.md | 37 + .../Developerguide/session_cpu_runtime.md | 88 + .../en/docs/Developerguide/session_memory.md | 46 + .../Developerguide/session_memory_detail.md | 74 + .../Developerguide/session_memory_runtime.md | 89 + .../en/docs/Developerguide/session_stat.md | 53 + .../Developerguide/session_stat_activity.md | 163 + .../en/docs/Developerguide/session_time.md | 46 + .../en/docs/Developerguide/set-constraints.md | 54 + .../Developerguide/set-returning-functions.md | 130 + content/en/docs/Developerguide/set-role.md | 72 + .../set-session-authorization.md | 76 + .../en/docs/Developerguide/set-transaction.md | 59 + content/en/docs/Developerguide/set.md | 116 + .../setting-account-security-policies.md | 133 + .../setting-password-security-policies.md | 518 ++ .../setting-security-policies.md | 9 + ...tting-the-validity-period-of-an-account.md | 57 + .../setting-up-and-running-benchmarksql.md | 29 + .../setting-user-permissions.md | 43 + content/en/docs/Developerguide/settings.md | 190 + .../Developerguide/shared_memory_detail.md | 60 + content/en/docs/Developerguide/show.md | 42 + .../docs/Developerguide/simple-dictionary.md | 59 + .../docs/Developerguide/simple-expressions.md | 98 + .../docs/Developerguide/snapshot-snapshot.md | 47 + .../snapshot-synchronization-functions.md | 18 + .../snapshot-tables_snap_timestamp.md | 65 + .../Developerguide/snowball-dictionary.md | 8 + .../sql-coverage-and-limitations.md | 19 + .../docs/Developerguide/sql-optimization.md | 25 + .../en/docs/Developerguide/sql-reference.md | 33 + content/en/docs/Developerguide/sql-syntax.md | 235 + .../en/docs/Developerguide/sqlallocconnect.md | 4 + content/en/docs/Developerguide/sqlallocenv.md | 4 + .../en/docs/Developerguide/sqlallochandle.md | 61 + .../en/docs/Developerguide/sqlallocstmt.md | 4 + content/en/docs/Developerguide/sqlbindcol.md | 76 + .../docs/Developerguide/sqlbindparameter.md | 100 + .../en/docs/Developerguide/sqlcolattribute.md | 82 + content/en/docs/Developerguide/sqlconnect.md | 83 + content/en/docs/Developerguide/sqldiag.md | 19 + .../en/docs/Developerguide/sqldisconnect.md | 46 + .../en/docs/Developerguide/sqlexecdirect.md | 61 + content/en/docs/Developerguide/sqlexecute.md | 49 + content/en/docs/Developerguide/sqlfetch.md | 48 + .../en/docs/Developerguide/sqlfreeconnect.md | 4 + content/en/docs/Developerguide/sqlfreeenv.md | 4 + .../en/docs/Developerguide/sqlfreehandle.md | 54 + content/en/docs/Developerguide/sqlfreestmt.md | 4 + content/en/docs/Developerguide/sqlgetdata.md | 78 + .../en/docs/Developerguide/sqlgetdiagrec.md | 159 + content/en/docs/Developerguide/sqlprepare.md | 59 + .../docs/Developerguide/sqlsetconnectattr.md | 64 + .../en/docs/Developerguide/sqlsetenvattr.md | 65 + .../en/docs/Developerguide/sqlsetstmtattr.md | 64 + ...dby-node-in-the-need-repair-(wal)-state.md | 14 + .../en/docs/Developerguide/standby-server.md | 122 + .../docs/Developerguide/start-transaction.md | 83 + .../docs/Developerguide/stat_all_indexes.md | 74 + .../en/docs/Developerguide/stat_all_tables.md | 165 + .../en/docs/Developerguide/stat_bad_block.md | 81 + .../en/docs/Developerguide/stat_database.md | 151 + .../Developerguide/stat_database_conflicts.md | 67 + .../docs/Developerguide/stat_sys_indexes.md | 74 + .../en/docs/Developerguide/stat_sys_tables.md | 165 + .../Developerguide/stat_user_functions.md | 60 + .../docs/Developerguide/stat_user_indexes.md | 74 + .../docs/Developerguide/stat_user_tables.md | 165 + .../Developerguide/stat_xact_all_tables.md | 95 + .../Developerguide/stat_xact_sys_tables.md | 95 + .../stat_xact_user_functions.md | 60 + .../Developerguide/stat_xact_user_tables.md | 95 + .../docs/Developerguide/statement-behavior.md | 247 + content/en/docs/Developerguide/statement.md | 228 + .../statement_complex_history.md | 4 + .../statement_complex_history_table.md | 4 + .../statement_complex_runtime.md | 356 + .../en/docs/Developerguide/statement_count.md | 203 + .../statement_iostat_complex_runtime.md | 81 + .../statement_responsetime_percentile.md | 32 + .../statement_user_complex_history.md | 4 + .../statement_wlmstat_complex_runtime.md | 181 + .../docs/Developerguide/statio_all_indexes.md | 67 + .../Developerguide/statio_all_sequences.md | 53 + .../docs/Developerguide/statio_all_tables.md | 95 + .../docs/Developerguide/statio_sys_indexes.md | 67 + .../Developerguide/statio_sys_sequences.md | 53 + .../docs/Developerguide/statio_sys_tables.md | 95 + .../Developerguide/statio_user_indexes.md | 67 + .../Developerguide/statio_user_sequences.md | 53 + .../docs/Developerguide/statio_user_tables.md | 95 + .../statistics-during-the-database-running.md | 7 + .../statistics-information-functions.md | 1478 ++++ content/en/docs/Developerguide/statistics.md | 51 + content/en/docs/Developerguide/stop-words.md | 27 + content/en/docs/Developerguide/storage-io.md | 38 + content/en/docs/Developerguide/storage.md | 7 + .../Developerguide/stored-procedure-21.md | 10 + .../Developerguide/stored-procedure-35.md | 10 + .../docs/Developerguide/stored-procedure.md | 27 + .../docs/Developerguide/sublink-name-hints.md | 31 + content/en/docs/Developerguide/subprogram.md | 4 + .../Developerguide/subquery-expressions.md | 135 + .../Developerguide/summary_file_iostat.md | 109 + .../summary_file_redo_iostat.md | 67 + .../docs/Developerguide/summary_rel_iostat.md | 46 + .../summary_stat_all_indexes.md | 60 + .../Developerguide/summary_stat_all_tables.md | 158 + .../Developerguide/summary_stat_bad_block.md | 67 + .../Developerguide/summary_stat_database.md | 144 + .../summary_stat_database_conflicts.md | 60 + .../summary_stat_sys_indexes.md | 60 + .../Developerguide/summary_stat_sys_tables.md | 158 + .../summary_stat_user_functions.md | 53 + .../summary_stat_user_indexes.md | 60 + .../summary_stat_user_tables.md | 158 + .../summary_stat_xact_all_tables.md | 88 + .../summary_stat_xact_sys_tables.md | 88 + .../summary_stat_xact_user_functions.md | 53 + .../summary_stat_xact_user_tables.md | 88 + .../docs/Developerguide/summary_statement.md | 228 + .../Developerguide/summary_statement_count.md | 193 + .../summary_statio_all_indexes.md | 53 + .../summary_statio_all_sequences.md | 46 + .../summary_statio_all_tables.md | 88 + .../summary_statio_sys_indexes.md | 53 + .../summary_statio_sys_sequences.md | 46 + .../summary_statio_sys_tables.md | 88 + .../summary_statio_user_indexes.md | 53 + .../summary_statio_user_sequences.md | 46 + .../summary_statio_user_tables.md | 88 + .../summary_transactions_prepared_xacts.md | 53 + .../docs/Developerguide/summary_user_login.md | 53 + .../summary_workload_sql_count.md | 81 + .../summary_workload_sql_elapse_time.md | 144 + .../summary_workload_transaction.md | 109 + .../docs/Developerguide/synonym-dictionary.md | 111 + .../system-administration-functions.md | 21 + .../system-catalogs-and-system-views.md | 9 + .../en/docs/Developerguide/system-catalogs.md | 145 + .../system-information-functions.md | 1150 +++ .../system-level-optimization.md | 4 + .../docs/Developerguide/system-operation.md | 28 + .../Developerguide/system-optimization.md | 11 + .../system-performance-snapshot.md | 45 + .../en/docs/Developerguide/system-views.md | 177 + .../docs/Developerguide/tables-and-indexes.md | 9 + .../Developerguide/technical-requirements.md | 14 + .../Developerguide/testing-a-configuration.md | 59 + .../Developerguide/testing-a-dictionary.md | 23 + .../docs/Developerguide/testing-a-parser.md | 63 + .../testing-and-debugging-text-search.md | 9 + .../text-search-functions-and-operators.md | 516 ++ .../docs/Developerguide/text-search-types.md | 162 + .../Developerguide/thesaurus-dictionary.md | 87 + .../docs/Developerguide/thread_wait_status.md | 102 + .../en/docs/Developerguide/tpc-c-benchmark.md | 106 + .../Developerguide/transaction-isolation.md | 13 + content/en/docs/Developerguide/transaction.md | 9 + .../transactions_prepared_xacts.md | 53 + .../docs/Developerguide/trigger-functions.md | 37 + .../docs/Developerguide/troubleshooting-14.md | 7 + .../en/docs/Developerguide/troubleshooting.md | 5 + content/en/docs/Developerguide/truncate.md | 123 + .../en/docs/Developerguide/tuning-process.md | 12 + .../type-conversion-functions.md | 588 ++ .../en/docs/Developerguide/type-conversion.md | 13 + .../typical-sql-optimization-methods.md | 13 + .../union-case-and-related-constructs.md | 115 + .../universal-file-access-functions.md | 143 + .../Developerguide/unsupported-data-types.md | 35 + .../docs/Developerguide/unsupported-dmls.md | 9 + .../Developerguide/unsupported-features.md | 12 + .../unsupported-index-ddls-and-index.md | 7 + ...r-native-compilation-and-lite-execution.md | 21 + .../Developerguide/unsupported-table-ddls.md | 9 + content/en/docs/Developerguide/update.md | 124 + ...pdating-a-table-by-using-dml-statements.md | 70 + ...-data-by-using-the-merge-into-statement.md | 143 + .../updating-data-in-a-table-4.md | 7 + .../updating-data-in-a-table.md | 40 + .../Developerguide/updating-statistics.md | 41 + .../docs/Developerguide/upgrade-parameters.md | 37 + .../en/docs/Developerguide/usage-guide-11.md | 75 + .../en/docs/Developerguide/usage-guide-9.md | 30 + content/en/docs/Developerguide/usage-guide.md | 207 + .../en/docs/Developerguide/usage-scenarios.md | 16 + content/en/docs/Developerguide/usage.md | 27 + .../user-and-permission-audit.md | 73 + .../Developerguide/user-defined-functions.md | 5 + content/en/docs/Developerguide/user_login.md | 53 + content/en/docs/Developerguide/users.md | 47 + ...sing-a-gsql-meta-command-to-import-data.md | 199 + .../Developerguide/using-csv-log-output.md | 197 + ...g-gs_dump-and-gs_dumpall-to-export-data.md | 11 + .../using-gs_restore-to-import-data.md | 306 + .../using-gsql-to-connect-to-a-database.md | 114 + content/en/docs/Developerguide/using-mot.md | 17 + .../using-partitioned-tables.md | 12 + content/en/docs/Developerguide/using-pcks.md | 9 + content/en/docs/Developerguide/utility.md | 33 + content/en/docs/Developerguide/uuid-type.md | 23 + content/en/docs/Developerguide/vacuum-33.md | 119 + content/en/docs/Developerguide/vacuum.md | 119 + .../en/docs/Developerguide/value-storage.md | 30 + content/en/docs/Developerguide/values.md | 62 + .../version-and-platform-compatibility.md | 7 + .../en/docs/Developerguide/viewing-data.md | 51 + .../viewing-parameter-values.md | 70 + content/en/docs/Developerguide/voltdb.md | 12 + .../en/docs/Developerguide/wait-events-27.md | 15 + .../en/docs/Developerguide/wait-events-42.md | 15 + content/en/docs/Developerguide/wait-events.md | 7 + content/en/docs/Developerguide/wait_events.md | 81 + .../Developerguide/wdr-snapshot-data-table.md | 6 + .../Developerguide/wdr-snapshot-schema.md | 11 + .../docs/Developerguide/what-is-a-document.md | 27 + ...cted-is-full-the-tpc-c-stops-responding.md | 14 + .../docs/Developerguide/window-functions.md | 625 ++ .../wlm_user_resource_config.md | 88 + .../wlm_user_resource_runtime.md | 95 + .../docs/Developerguide/workflow-overview.md | 19 + .../Developerguide/working-with-databases.md | 19 + .../docs/Developerguide/workload-manager.md | 7 + content/en/docs/Developerguide/workload.md | 17 + .../docs/Developerguide/workload_sql_count.md | 74 + .../workload_sql_elapse_time.md | 137 + .../Developerguide/workload_transaction.md | 109 + .../en/docs/Developerguide/write-ahead-log.md | 11 + .../write-and-read-write-operations.md | 13 + content/en/docs/Developerguide/x-tuner.md | 13 + .../Developerguide/zone-and-formatting.md | 178 + content/en/docs/Glossary/Glossary.md | 641 ++ .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes ...l)-setting-the-standby-node-to-readable.md | 10 + content/en/docs/Quickstart/Quickstart.md | 3 + content/en/docs/Quickstart/alarm-detection.md | 47 + content/en/docs/Quickstart/archiving.md | 50 + .../Quickstart/asynchronous-i-o-operations.md | 111 + content/en/docs/Quickstart/audit-switch.md | 107 + content/en/docs/Quickstart/auditing.md | 9 + .../en/docs/Quickstart/automatic-vacuuming.md | 192 + .../en/docs/Quickstart/background-writer.md | 49 + .../Quickstart/checking-the-health-status.md | 61 + content/en/docs/Quickstart/checkpoints.md | 111 + .../communication-library-parameters.md | 215 + .../compatibility-with-earlier-versions.md | 127 + ...n-file-for-primary-secondary-deployment.md | 47 + ...ion-file-for-single-instance-deployment.md | 37 + .../configuring-a-whitelist-using-gs_guc.md | 52 + .../configuring-opengauss-parameters.md | 7 + .../Quickstart/configuring-os-parameters.md | 503 ++ ...uring-parameters-in-configuration-files.md | 75 + ...uring-primary-database-node-information.md | 89 + .../configuring-running-parameters.md | 7 + .../configuring-the-basic-host-information.md | 72 + ...uring-the-database-name-and-directories.md | 106 + ...onfiguring-the-locale-and-character-set.md | 119 + .../confirming-connection-information.md | 48 + .../connecting-to-a-database-locally.md | 54 + .../connecting-to-a-database-remotely.md | 7 + .../Quickstart/connecting-to-a-database.md | 140 + .../connection-and-authentication.md | 9 + .../Quickstart/connection-pool-parameters.md | 37 + .../en/docs/Quickstart/connection-settings.md | 143 + .../Quickstart/cost-based-vacuum-delay.md | 69 + .../creating-a-configuration-file.md | 15 + .../en/docs/Quickstart/creating-a-database.md | 78 + content/en/docs/Quickstart/creating-a-role.md | 71 + .../en/docs/Quickstart/creating-a-table.md | 634 ++ content/en/docs/Quickstart/creating-a-user.md | 84 + ...onfiguring-the-installation-environment.md | 361 + .../default-settings-of-client-connection.md | 9 + .../en/docs/Quickstart/developer-options.md | 662 ++ .../Quickstart/disabling-the-os-firewall.md | 97 + .../Quickstart/disabling-the-swap-memory.md | 8 + content/en/docs/Quickstart/disk-space.md | 29 + .../Quickstart/en-us_bookmap_0241499761.md | 161 + .../Quickstart/error-reporting-and-logging.md | 11 + .../establishing-mutual-trust-manually.md | 310 + content/en/docs/Quickstart/examples-0.md | 174 + content/en/docs/Quickstart/examples.md | 7 + .../Quickstart/executing-an-sql-statement.md | 48 + .../Quickstart/executing-files-in-batches.md | 22 + .../docs/Quickstart/executing-installation.md | 212 + .../executing-sql-statements-using-jdbc.md | 11 + ...ng-sql-statements-using-the-client-tool.md | 7 + content/en/docs/Quickstart/faqs.md | 5 + content/en/docs/Quickstart/fault-tolerance.md | 99 + .../figures/installation-process.png | Bin 0 -> 137979 bytes ...4\347\275\221\347\244\272\344\276\213.png" | Bin 0 -> 34995 bytes ...\346\236\266\346\236\204\345\233\2761.png" | Bin 0 -> 14080 bytes .../Quickstart/figures/typical-networking.png | Bin 0 -> 25211 bytes content/en/docs/Quickstart/file-location.md | 63 + .../Quickstart/genetic-query-optimizer.md | 99 + .../docs/Quickstart/granting-permissions.md | 259 + .../en/docs/Quickstart/guc-parameter-usage.md | 10 + content/en/docs/Quickstart/guc-parameters.md | 55 + content/en/docs/Quickstart/ha-replication.md | 9 + .../docs/Quickstart/initial-configuration.md | 7 + ...itializing-the-installation-environment.md | 11 + .../docs/Quickstart/installation-process.md | 56 + .../installation-user-and-user-group.md | 37 + ...sql-client-and-connecting-to-a-database.md | 68 + .../Quickstart/installing-the-opengauss.md | 15 + .../jdbc-package-and-driver-class.md | 15 + .../docs/Quickstart/kernel-resource-usage.md | 34 + .../Quickstart/learning-product-knowledge.md | 11 + content/en/docs/Quickstart/load-management.md | 373 + .../en/docs/Quickstart/loading-the-driver.md | 13 + content/en/docs/Quickstart/lock-management.md | 124 + content/en/docs/Quickstart/log-replay.md | 68 + content/en/docs/Quickstart/logging-content.md | 389 + .../en/docs/Quickstart/logging-destination.md | 162 + content/en/docs/Quickstart/logging-time.md | 183 + content/en/docs/Quickstart/memory.md | 279 + .../Quickstart/miscellaneous-parameters.md | 387 + .../Quickstart/modifying-os-configuration.md | 13 + ...g-and-verifying-an-installation-package.md | 55 + .../docs/Quickstart/opengauss-transaction.md | 125 + .../en/docs/Quickstart/operation-auditing.md | 309 + .../Quickstart/optimizer-cost-constants.md | 87 + .../optimizer-method-configuration.md | 335 + .../Quickstart/other-default-parameters.md | 48 + .../Quickstart/other-optimizer-options.md | 327 + .../docs/Quickstart/parallel-data-import.md | 52 + .../docs/Quickstart/performance-statistics.md | 28 + .../platform-and-client-compatibility.md | 53 + .../Quickstart/preparing-for-installation.md | 13 + ...e-and-hardware-installation-environment.md | 11 + content/en/docs/Quickstart/primary-server.md | 144 + .../en/docs/Quickstart/product-features.md | 19 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes .../query-and-index-statistics-collector.md | 144 + content/en/docs/Quickstart/query-planning.md | 18 + content/en/docs/Quickstart/query.md | 74 + .../docs/Quickstart/resetting-parameters.md | 635 ++ .../docs/Quickstart/resource-consumption.md | 15 + ...ty-and-authentication-(postgresql-conf).md | 381 + content/en/docs/Quickstart/sending-server.md | 115 + .../setting-a-client-authentication-policy.md | 94 + .../setting-character-set-parameters.md | 8 + .../setting-remote-login-of-user-root.md | 60 + .../en/docs/Quickstart/setting-the-nic-mtu.md | 8 + .../setting-the-time-zone-and-time.md | 20 + content/en/docs/Quickstart/settings.md | 190 + .../docs/Quickstart/simple-data-management.md | 9 + .../simple-permission-management.md | 9 + .../software-and-hardware-requirements.md | 136 + .../docs/Quickstart/software-architecture.md | 57 + content/en/docs/Quickstart/standby-server.md | 122 + .../en/docs/Quickstart/statement-behavior.md | 247 + .../statistics-during-the-database-running.md | 7 + .../Quickstart/system-performance-snapshot.md | 45 + .../en/docs/Quickstart/typical-networking.md | 45 + .../en/docs/Quickstart/upgrade-parameters.md | 37 + .../Quickstart/user-and-permission-audit.md | 73 + .../docs/Quickstart/using-csv-log-output.md | 197 + content/en/docs/Quickstart/using-opengauss.md | 17 + .../using-the-gsql-client-for-connection.md | 9 + .../Quickstart/verifying-the-installation.md | 5 + .../version-and-platform-compatibility.md | 7 + content/en/docs/Quickstart/viewing-objects.md | 49 + .../Quickstart/viewing-parameter-values.md | 70 + content/en/docs/Quickstart/wait-events.md | 15 + ...rust-between-nodes-in-opengauss-is-lost.md | 222 + content/en/docs/Quickstart/write-ahead-log.md | 11 + .../en/docs/Quickstart/zone-and-formatting.md | 178 + content/en/docs/Releasenotes/Releasenotes.md | 3 + content/en/docs/Releasenotes/Terms of Use.md | 14 + .../en/docs/Releasenotes/acknowledgement.md | 4 + .../common-vulnerabilities-and-exposures.md | 4 + content/en/docs/Releasenotes/contribution.md | 24 + .../docs/Releasenotes/feature-introduction.md | 32 + .../en/docs/Releasenotes/important-notes.md | 8 + content/en/docs/Releasenotes/known-issues.md | 13 + content/en/docs/Releasenotes/new-features.md | 4 + .../docs/Releasenotes/optimized-features.md | 4 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes .../en/docs/Releasenotes/resolved-issues.md | 4 + content/en/docs/Releasenotes/source-code.md | 11 + content/en/docs/Releasenotes/user-notice.md | 6 + .../docs/Releasenotes/version-introduction.md | 9 + .../Technicalwhitepaper.md | 3 + .../Technicalwhitepaper/access-control.md | 10 + .../adaptive-compression.md | 79 + .../application-scenario.md | 11 + ...ons-oriented-to-application-development.md | 17 + .../docs/Technicalwhitepaper/cbo-optimizer.md | 6 + .../Technicalwhitepaper/common-concepts.md | 21 + .../copy-interface-for-error-tolerance.md | 4 + .../core-database-technologies.md | 15 + .../Technicalwhitepaper/database-audit.md | 10 + .../database-encryption-authentication.md | 10 + .../Technicalwhitepaper/database-security.md | 15 + .../Technicalwhitepaper/deployment-modes.md | 101 + .../deployment-solutions.md | 11 + ...-with-one-primary-and-multiple-standbys.md | 14 + .../figures/column-store.png | Bin 0 -> 96451 bytes ...atabase-management-and-storage-network.png | Bin 0 -> 34995 bytes .../figures/en-us_image_0253069486.png | Bin 0 -> 12588 bytes .../figures/en-us_image_0253141769.png | Bin 0 -> 25211 bytes ...kunpeng-numa-architecture-optimization.png | Bin 0 -> 33621 bytes ...rimary-and-multiple-standby-deployment.png | Bin 0 -> 13580 bytes .../figures/opengauss-logical-components.png | Bin 0 -> 14080 bytes .../figures/primary-standby-deployment.png | Bin 0 -> 12085 bytes .../en/docs/Technicalwhitepaper/glossary.md | 641 ++ content/en/docs/Technicalwhitepaper/ha.md | 9 + .../hardware-requirements.md | 43 + .../high-concurrency-of-the-thread-pool.md | 8 + .../Technicalwhitepaper/high-performance.md | 15 + .../Technicalwhitepaper/high-scalability.md | 5 + .../hybrid-row-column-storage.md | 47 + .../introduction-to-deployment-solutions.md | 11 + .../kunpeng-numa-architecture-optimization.md | 9 + .../Technicalwhitepaper/logical-backup.md | 4 + .../Technicalwhitepaper/maintainability.md | 9 + .../network-communication-security.md | 88 + ...-click-diagnosis-information-collection.md | 16 + .../docs/Technicalwhitepaper/partitioning.md | 19 + .../pg-interface-compatibility.md | 4 + .../Technicalwhitepaper/physical-backup.md | 6 + .../primary-standby-deployment.md | 7 + .../Technicalwhitepaper/primary-standby.md | 8 + .../product-positioning.md | 8 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes .../row-level-access-control.md | 8 + ...ation-of-control-and-access-permissions.md | 8 + .../Technicalwhitepaper/slow-sql-diagnosis.md | 14 + .../software-and-hardware-requirements.md | 7 + .../software-architecture.md | 42 + .../software-requirements.md | 35 + .../en/docs/Technicalwhitepaper/sql-bypass.md | 4 + .../en/docs/Technicalwhitepaper/sql-hints.md | 6 + .../standalone-deployment.md | 9 + .../standard-development-interfaces.md | 6 + .../docs/Technicalwhitepaper/standard-sql.md | 8 + ...ort-for-functions-and-stored-procedures.md | 16 + .../technical-characteristics.md | 20 + .../technical-specifications.md | 97 + .../transaction-support.md | 36 + .../Technicalwhitepaper/typical-networking.md | 45 + .../workload-diagnosis-report.md | 125 + .../en/docs/Toolreference/Toolreference.md | 3 + ...n-the-gphome-when-a-command-is-executed.md | 28 + content/en/docs/Toolreference/client-tool.md | 7 + .../docs/Toolreference/command-reference-1.md | 240 + .../docs/Toolreference/command-reference.md | 343 + content/en/docs/Toolreference/faqs-2.md | 7 + content/en/docs/Toolreference/faqs.md | 255 + content/en/docs/Toolreference/gaussdb.md | 475 ++ content/en/docs/Toolreference/gs_backup.md | 141 + .../en/docs/Toolreference/gs_basebackup.md | 125 + content/en/docs/Toolreference/gs_check.md | 1445 ++++ content/en/docs/Toolreference/gs_checkos.md | 655 ++ content/en/docs/Toolreference/gs_checkperf.md | 363 + content/en/docs/Toolreference/gs_collector.md | 308 + content/en/docs/Toolreference/gs_ctl.md | 400 ++ content/en/docs/Toolreference/gs_dump.md | 561 ++ content/en/docs/Toolreference/gs_dumpall.md | 253 + content/en/docs/Toolreference/gs_guc.md | 320 + content/en/docs/Toolreference/gs_initdb.md | 9 + content/en/docs/Toolreference/gs_install.md | 129 + content/en/docs/Toolreference/gs_om.md | 542 ++ .../en/docs/Toolreference/gs_postuninstall.md | 119 + .../en/docs/Toolreference/gs_preinstall.md | 363 + content/en/docs/Toolreference/gs_restore.md | 387 + content/en/docs/Toolreference/gs_ssh.md | 67 + content/en/docs/Toolreference/gs_sshexkey.md | 152 + content/en/docs/Toolreference/gs_uninstall.md | 76 + .../en/docs/Toolreference/gs_upgradectl.md | 228 + content/en/docs/Toolreference/gsql.md | 17 + content/en/docs/Toolreference/gstrace.md | 147 + content/en/docs/Toolreference/kadmin-local.md | 10 + content/en/docs/Toolreference/kdb5_util.md | 10 + content/en/docs/Toolreference/kdestroy.md | 10 + content/en/docs/Toolreference/kinit.md | 10 + content/en/docs/Toolreference/klist.md | 10 + content/en/docs/Toolreference/krb5kdc.md | 10 + .../Toolreference/meta-command-reference.md | 988 +++ .../obtaining-help-information.md | 120 + content/en/docs/Toolreference/overview-0.md | 19 + content/en/docs/Toolreference/overview.md | 444 ++ content/en/docs/Toolreference/pg_config.md | 136 + .../en/docs/Toolreference/pg_controldata.md | 47 + content/en/docs/Toolreference/pg_resetxlog.md | 65 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes ...ption-during-standby-instance-rebuildin.md | 47 + content/en/docs/Toolreference/server-tools.md | 25 + ...ogs-and-views-supported-by-gs_collector.md | 237 + .../en/docs/Toolreference/tool-overview.md | 77 + .../tools-used-in-the-internal-system.md | 47 + content/en/docs/Toolreference/usage-guide.md | 56 + .../en/docs/Toolreference/usage-guidelines.md | 186 + ...l)-setting-the-standby-node-to-readable.md | 10 + .../checking-the-health-status.md | 61 + ...n-file-for-primary-secondary-deployment.md | 47 + ...ion-file-for-single-instance-deployment.md | 37 + .../installation/configuring-os-parameters.md | 503 ++ ...uring-primary-database-node-information.md | 89 + .../configuring-the-basic-host-information.md | 72 + ...uring-the-database-name-and-directories.md | 106 + ...onfiguring-the-locale-and-character-set.md | 119 + .../creating-a-configuration-file.md | 15 + ...onfiguring-the-installation-environment.md | 361 + .../deleting-cluster-configurations.md | 92 + .../installation/disabling-the-os-firewall.md | 97 + .../installation/disabling-the-swap-memory.md | 8 + .../establishing-mutual-trust-manually.md | 310 + content/en/docs/installation/examples.md | 7 + .../installation/executing-installation.md | 212 + .../installation/executing-uninstallation.md | 54 + .../figures/installation-process.png | Bin 0 -> 137979 bytes .../installation/initial-configuration.md | 7 + ...itializing-the-installation-environment.md | 11 + .../installation/installation-overview.md | 9 + .../docs/installation/installation-process.md | 56 + .../installation-user-and-user-group.md | 37 + content/en/docs/installation/installation.md | 10 +- .../installation/installing-the-opengauss.md | 11 + .../modifying-os-configuration.md | 13 + ...g-and-verifying-an-installation-package.md | 55 + .../preparing-for-installation.md | 13 + ...e-and-hardware-installation-environment.md | 11 + .../public_sys-resources/icon-caution.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 580 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 394 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 406 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 253 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 580 bytes .../setting-character-set-parameters.md | 8 + .../setting-remote-login-of-user-root.md | 60 + .../docs/installation/setting-the-nic-mtu.md | 8 + .../setting-the-time-zone-and-time.md | 20 + .../software-and-hardware-requirements.md | 136 + .../uninstalling-the-opengauss.md | 9 + .../verifying-the-installation.md | 5 + content/en/menu/index.md | 1379 +++- .../docs/Administratorguide/gs_basebackup.md | 2 +- content/zh/docs/Administratorguide/gs_dump.md | 10 +- .../zh/docs/Administratorguide/gs_dumpall.md | 2 +- .../zh/docs/Administratorguide/gs_restore.md | 12 +- ...\345\270\270\345\244\204\347\220\206-1.md" | 2 +- .../build-sh\344\273\213\347\273\215.md" | 11 +- .../figures/\347\273\230\345\233\2761.png" | Bin 30588 -> 31407 bytes ...05\345\214\205\347\274\226\350\257\221.md" | 5 +- ...43\347\240\201\344\270\213\350\275\275.md" | 7 +- ...e-executables-\346\212\245\351\224\231.md" | 2 +- ...26\350\257\221\346\236\204\345\273\272.md" | 46 +- ...10\346\234\254\347\274\226\350\257\221.md" | 2 +- ...11\350\243\205\347\274\226\350\257\221.md" | 3 +- ...30\345\214\226\346\214\207\345\257\274.md" | 2 + .../GIN\347\264\242\345\274\225.md" | 2 +- ...02\346\225\260\350\257\264\346\230\216.md" | 4 +- ...14\347\216\257\345\242\203\347\261\273.md" | 2 +- .../MOT\351\231\220\345\210\266.md" | 103 + content/zh/docs/Developerguide/Query-24.md | 74 + ...30\345\214\226\346\214\207\345\257\274.md" | 2 + ...5\345\255\230\345\274\225\346\223\216.png" | Bin 0 -> 20637 bytes .../\344\273\213\347\273\215-19.md" | 10 + ...74\345\205\245\346\225\260\346\215\256.md" | 2 +- .../\345\206\205\345\255\230-23.md" | 279 + .../\345\206\205\345\255\230\350\241\250.md" | 52 +- ...77\347\224\250\346\214\207\345\257\274.md" | 17 + ...70\345\236\213\345\272\224\347\224\250.md" | 9 + ...00\346\234\257\344\273\213\347\273\215.md" | 30 + ...30\350\241\250\347\211\271\346\200\247.md" | 15 + ...50\345\222\214\347\264\242\345\274\225.md" | 44 + ...345\202\250\350\277\207\347\250\213-20.md" | 10 + ...30\345\202\250\350\277\207\347\250\213.md" | 6 +- ...351\242\230\345\244\204\347\220\206-17.md" | 19 + ...60\345\200\274\347\261\273\345\236\213.md" | 2 +- ...02\346\225\260\350\260\203\344\274\230.md" | 2 +- ...346\215\256\347\261\273\345\236\213-21.md" | 4 + .../\346\243\200\346\237\245\347\202\271.md" | 4 +- .../\346\246\202\350\277\260-16.md" | 48 +- .../\346\246\202\350\277\260-18.md" | 47 + ...73\345\236\213\350\275\254\346\215\242.md" | 2 +- .../\350\260\203\350\257\225-22.md" | 122 + ...37\350\275\275\347\256\241\347\220\206.md" | 2 +- ...04\346\272\220\346\266\210\350\200\227.md" | 2 +- ...36\346\216\245\350\256\276\347\275\256.md" | 16 + ...41\345\272\223\345\217\202\346\225\260.md" | 14 + .../\351\203\250\347\275\262.md" | 4 + ...15\347\275\256\345\207\206\345\244\207.md" | 12 + ...02\346\225\260\350\257\264\346\230\216.md" | 4 - ...16\351\251\261\345\212\250\347\261\273.md" | 2 +- ...4\347\275\221\347\244\272\344\276\213.png" | Bin 31137 -> 47731 bytes ...70\345\236\213\347\273\204\347\275\221.md" | 4 +- .../\345\206\205\345\255\230\350\241\250.md" | 67 + ...350\241\214\345\215\225\346\235\241SQL.md" | 2 +- ...47\350\241\214\345\256\211\350\243\205.md" | 15 +- ...71\351\207\217\346\226\207\344\273\266.md" | 2 +- ...14\345\256\211\350\243\205\345\214\205.md" | 2 +- ...37\350\275\275\347\256\241\347\220\206.md" | 2 +- ...57\345\242\203\350\246\201\346\261\202.md" | 2 +- ...36\346\216\245\350\256\276\347\275\256.md" | 16 + ...41\345\272\223\345\217\202\346\225\260.md" | 14 + ...72\346\234\254\344\277\241\346\201\257.md" | 8 +- ...04\351\241\271\347\233\256\345\275\225.md" | 1 + content/zh/docs/Releasenotes/Releasenotes.md | 5 +- ...02\344\270\216\350\264\241\347\214\256.md" | 2 +- .../\346\272\220\344\273\243\347\240\201.md" | 8 +- .../Technicalwhitepaper.md | 2 +- .../zh/docs/Toolreference/gs_basebackup.md | 2 +- content/zh/docs/Toolreference/gs_collector.md | 96 +- ...42\345\244\215\346\226\271\346\263\225.md" | 2 +- content/zh/docs/Toolreference/gs_ctl.md | 1 + content/zh/docs/Toolreference/gs_dump.md | 10 +- content/zh/docs/Toolreference/gs_dumpall.md | 2 +- .../zh/docs/Toolreference/gs_postuninstall.md | 6 +- content/zh/docs/Toolreference/gs_restore.md | 12 +- .../gsql\346\246\202\350\277\260.md" | 2 +- ...\344\273\244\345\217\202\350\200\203-1.md" | 76 +- ...75\344\273\244\345\217\202\350\200\203.md" | 40 +- ...47\350\241\214\345\256\211\350\243\205.md" | 15 +- ...14\345\256\211\350\243\205\345\214\205.md" | 2 +- ...57\345\242\203\350\246\201\346\261\202.md" | 2 +- ...72\346\234\254\344\277\241\346\201\257.md" | 8 +- ...04\351\241\271\347\233\256\345\275\225.md" | 1 + content/zh/menu/index.md | 17 +- 1735 files changed, 148126 insertions(+), 317 deletions(-) create mode 100644 content/en/docs.lnk create mode 100644 content/en/docs/Administratorguide/Administratorguide.md create mode 100644 content/en/docs/Administratorguide/audit-logs.md create mode 100644 content/en/docs/Administratorguide/backup-and-restoration.md create mode 100644 content/en/docs/Administratorguide/check-method-0.md create mode 100644 content/en/docs/Administratorguide/check-method-2.md create mode 100644 content/en/docs/Administratorguide/check-method.md create mode 100644 content/en/docs/Administratorguide/checking-and-deleting-logs.md create mode 100644 content/en/docs/Administratorguide/checking-database-performance.md create mode 100644 content/en/docs/Administratorguide/checking-opengauss-health-status.md create mode 100644 content/en/docs/Administratorguide/checking-opengauss-run-logs.md create mode 100644 content/en/docs/Administratorguide/checking-os-logs.md create mode 100644 content/en/docs/Administratorguide/checking-os-parameters.md create mode 100644 content/en/docs/Administratorguide/checking-the-number-of-application-connections.md create mode 100644 content/en/docs/Administratorguide/checking-time-consistency.md create mode 100644 content/en/docs/Administratorguide/cleaning-run-logs.md create mode 100644 content/en/docs/Administratorguide/data-security-maintenance-suggestions.md create mode 100644 content/en/docs/Administratorguide/exception-handling-1.md create mode 100644 content/en/docs/Administratorguide/exception-handling-3.md create mode 100644 content/en/docs/Administratorguide/exception-handling.md create mode 100644 content/en/docs/Administratorguide/generating-configuration-files.md create mode 100644 content/en/docs/Administratorguide/gs_basebackup.md create mode 100644 content/en/docs/Administratorguide/gs_dump.md create mode 100644 content/en/docs/Administratorguide/gs_dumpall.md create mode 100644 content/en/docs/Administratorguide/gs_restore.md create mode 100644 content/en/docs/Administratorguide/log-overview.md create mode 100644 content/en/docs/Administratorguide/log-reference.md create mode 100644 content/en/docs/Administratorguide/logical-backup-and-restoration.md create mode 100644 content/en/docs/Administratorguide/operation-logs.md create mode 100644 content/en/docs/Administratorguide/overview.md create mode 100644 content/en/docs/Administratorguide/performance-logs.md create mode 100644 content/en/docs/Administratorguide/physical-backup-and-restoration.md create mode 100644 content/en/docs/Administratorguide/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Administratorguide/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Administratorguide/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Administratorguide/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Administratorguide/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Administratorguide/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Administratorguide/querying-status.md create mode 100644 content/en/docs/Administratorguide/risky-operations.md create mode 100644 content/en/docs/Administratorguide/routine-maintenance-check-items.md create mode 100644 content/en/docs/Administratorguide/routine-maintenance.md create mode 100644 content/en/docs/Administratorguide/routinely-maintaining-tables.md create mode 100644 content/en/docs/Administratorguide/routinely-recreating-an-index.md create mode 100644 content/en/docs/Administratorguide/starting-and-stopping-opengauss.md create mode 100644 content/en/docs/Administratorguide/system-logs.md create mode 100644 content/en/docs/Administratorguide/wals.md create mode 100644 content/en/docs/Compilationguide/Compilation.md create mode 100644 content/en/docs/Compilationguide/compiling-open-source-software.md create mode 100644 content/en/docs/Compilationguide/compiling-the-installation-package.md create mode 100644 content/en/docs/Compilationguide/compiling-the-version.md create mode 100644 content/en/docs/Compilationguide/configuring-environment-variables.md create mode 100644 content/en/docs/Compilationguide/downloading-code.md create mode 100644 content/en/docs/Compilationguide/faqs.md create mode 100644 "content/en/docs/Compilationguide/figures/\347\273\230\345\233\2761.png" create mode 100644 content/en/docs/Compilationguide/hardware-requirements.md create mode 100644 content/en/docs/Compilationguide/how-do-i-delete-temporary-files-generated-during-compilation.md create mode 100644 content/en/docs/Compilationguide/how-do-i-handle-the-g++-fatal-error-killed-signal-terminated-program-cclplus-error.md create mode 100644 content/en/docs/Compilationguide/how-do-i-handle-the-out-of-memory-allocating-xxx-bytes-after-a-total-of-xxx-bytes-error.md create mode 100644 content/en/docs/Compilationguide/how-do-i-resolve-the-configure-error-c-compiler-cannot-create-executables-error.md create mode 100644 content/en/docs/Compilationguide/introduction-to-build-sh.md create mode 100644 content/en/docs/Compilationguide/introduction.md create mode 100644 content/en/docs/Compilationguide/os-requirements.md create mode 100644 content/en/docs/Compilationguide/overview.md create mode 100644 content/en/docs/Compilationguide/preparation-before-compiling.md create mode 100644 content/en/docs/Compilationguide/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Compilationguide/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Compilationguide/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Compilationguide/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Compilationguide/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Compilationguide/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Compilationguide/purpose.md create mode 100644 content/en/docs/Compilationguide/setting-up-the-compilation-environment.md create mode 100644 content/en/docs/Compilationguide/software-compilation-and-installation.md create mode 100644 content/en/docs/Compilationguide/software-dependency-requirements.md create mode 100644 content/en/docs/Compilationguide/software-requirements.md create mode 100644 content/en/docs/Description/Description.md create mode 100644 content/en/docs/Description/application-scenarios.md create mode 100644 content/en/docs/Description/basic-features.md create mode 100644 content/en/docs/Description/data-partitioning.md create mode 100644 content/en/docs/Description/enhanced-features.md create mode 100644 content/en/docs/Description/figures/opengauss-logical-components.png create mode 100644 "content/en/docs/Description/figures/opengauss\350\241\214\345\210\227\346\267\267\345\255\230\345\274\225\346\223\216.png" create mode 100644 "content/en/docs/Description/figures/\345\220\221\351\207\217\345\214\226\346\211\247\350\241\214\345\274\225\346\223\216(png).png" create mode 100644 content/en/docs/Description/ha-transaction-processing.md create mode 100644 content/en/docs/Description/high-concurrency-and-high-performance.md create mode 100644 content/en/docs/Description/memory-table.md create mode 100644 content/en/docs/Description/operating-environment.md create mode 100644 content/en/docs/Description/primary-standby.md create mode 100644 content/en/docs/Description/product-positioning.md create mode 100644 content/en/docs/Description/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Description/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Description/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Description/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Description/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Description/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Description/sql-self-diagnosis.md create mode 100644 content/en/docs/Description/system-architecture.md create mode 100644 content/en/docs/Description/technical-specifications.md create mode 100644 content/en/docs/Description/vectorized-executor-and-hybrid-row-column-storage-engine.md create mode 100644 content/en/docs/Developerguide/Developerguide.md create mode 100644 content/en/docs/Developerguide/abort.md create mode 100644 content/en/docs/Developerguide/additional-features.md create mode 100644 content/en/docs/Developerguide/administration.md create mode 100644 content/en/docs/Developerguide/administrators.md create mode 100644 content/en/docs/Developerguide/advisory-lock-functions.md create mode 100644 content/en/docs/Developerguide/aggregate-functions.md create mode 100644 content/en/docs/Developerguide/ai-features.md create mode 100644 content/en/docs/Developerguide/alarm-detection.md create mode 100644 content/en/docs/Developerguide/alter-data-source.md create mode 100644 content/en/docs/Developerguide/alter-database.md create mode 100644 content/en/docs/Developerguide/alter-default-privileges.md create mode 100644 content/en/docs/Developerguide/alter-directory.md create mode 100644 content/en/docs/Developerguide/alter-function.md create mode 100644 content/en/docs/Developerguide/alter-group.md create mode 100644 content/en/docs/Developerguide/alter-index.md create mode 100644 content/en/docs/Developerguide/alter-large-object.md create mode 100644 content/en/docs/Developerguide/alter-role.md create mode 100644 content/en/docs/Developerguide/alter-row-level-security-policy.md create mode 100644 content/en/docs/Developerguide/alter-schema.md create mode 100644 content/en/docs/Developerguide/alter-sequence.md create mode 100644 content/en/docs/Developerguide/alter-session.md create mode 100644 content/en/docs/Developerguide/alter-synonym.md create mode 100644 content/en/docs/Developerguide/alter-system-kill-session.md create mode 100644 content/en/docs/Developerguide/alter-table-partition.md create mode 100644 content/en/docs/Developerguide/alter-table.md create mode 100644 content/en/docs/Developerguide/alter-tablespace.md create mode 100644 content/en/docs/Developerguide/alter-text-search-configuration.md create mode 100644 content/en/docs/Developerguide/alter-text-search-dictionary.md create mode 100644 content/en/docs/Developerguide/alter-trigger.md create mode 100644 content/en/docs/Developerguide/alter-type.md create mode 100644 content/en/docs/Developerguide/alter-user.md create mode 100644 content/en/docs/Developerguide/alter-view.md create mode 100644 content/en/docs/Developerguide/analyze-analyse.md create mode 100644 content/en/docs/Developerguide/analyze-table.md create mode 100644 content/en/docs/Developerguide/analyzing-hardware-bottlenecks.md create mode 100644 content/en/docs/Developerguide/anonymous-blocks.md create mode 100644 content/en/docs/Developerguide/api-reference.md create mode 100644 content/en/docs/Developerguide/apis.md create mode 100644 content/en/docs/Developerguide/appendix.md create mode 100644 content/en/docs/Developerguide/application-development-guide.md create mode 100644 content/en/docs/Developerguide/archiving.md create mode 100644 content/en/docs/Developerguide/array-expressions.md create mode 100644 content/en/docs/Developerguide/array-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/arrays-and-records.md create mode 100644 content/en/docs/Developerguide/arrays.md create mode 100644 content/en/docs/Developerguide/assignment-statements.md create mode 100644 content/en/docs/Developerguide/asynchronous-i-o-operations.md create mode 100644 content/en/docs/Developerguide/audit-switch.md create mode 100644 content/en/docs/Developerguide/auditing.md create mode 100644 content/en/docs/Developerguide/automatic-vacuuming.md create mode 100644 content/en/docs/Developerguide/background-writer.md create mode 100644 content/en/docs/Developerguide/backup-and-restoration-control-functions.md create mode 100644 content/en/docs/Developerguide/basic-statements.md create mode 100644 content/en/docs/Developerguide/basic-structure.md create mode 100644 content/en/docs/Developerguide/basic-text-matching.md create mode 100644 content/en/docs/Developerguide/before-you-start.md create mode 100644 content/en/docs/Developerguide/begin.md create mode 100644 content/en/docs/Developerguide/benchmarksql-an-open-source-tpc-c-tool.md create mode 100644 content/en/docs/Developerguide/best-practices.md create mode 100644 content/en/docs/Developerguide/bgwriter_stat.md create mode 100644 content/en/docs/Developerguide/binary-data-types.md create mode 100644 content/en/docs/Developerguide/binary-string-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/bios-16.md create mode 100644 content/en/docs/Developerguide/bios.md create mode 100644 content/en/docs/Developerguide/bit-string-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/bit-string-types.md create mode 100644 content/en/docs/Developerguide/boolean-data-types.md create mode 100644 content/en/docs/Developerguide/branch-statements.md create mode 100644 content/en/docs/Developerguide/cache-io.md create mode 100644 content/en/docs/Developerguide/call-statement.md create mode 100644 content/en/docs/Developerguide/call.md create mode 100644 content/en/docs/Developerguide/character-data-types.md create mode 100644 content/en/docs/Developerguide/character-processing-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/checking-blocked-statements.md create mode 100644 content/en/docs/Developerguide/checking-the-number-of-database-connections.md create mode 100644 content/en/docs/Developerguide/checkpoint-22.md create mode 100644 content/en/docs/Developerguide/checkpoint-32.md create mode 100644 content/en/docs/Developerguide/checkpoint.md create mode 100644 content/en/docs/Developerguide/checkpoints-41.md create mode 100644 content/en/docs/Developerguide/checkpoints.md create mode 100644 content/en/docs/Developerguide/class_vital_info.md create mode 100644 content/en/docs/Developerguide/client-access-authentication.md create mode 100644 content/en/docs/Developerguide/close.md create mode 100644 content/en/docs/Developerguide/closing-a-connection.md create mode 100644 content/en/docs/Developerguide/cluster.md create mode 100644 content/en/docs/Developerguide/command-reference-13.md create mode 100644 content/en/docs/Developerguide/command-reference.md create mode 100644 content/en/docs/Developerguide/comment.md create mode 100644 content/en/docs/Developerguide/commissioning.md create mode 100644 content/en/docs/Developerguide/commit-end.md create mode 100644 content/en/docs/Developerguide/commit-prepared.md create mode 100644 content/en/docs/Developerguide/common-faults-and-identification.md create mode 100644 content/en/docs/Developerguide/communication-library-parameters.md create mode 100644 content/en/docs/Developerguide/comparison-disk-vs-mot.md create mode 100644 content/en/docs/Developerguide/comparison-operators.md create mode 100644 content/en/docs/Developerguide/compatibility-with-earlier-versions.md create mode 100644 content/en/docs/Developerguide/competitive-overview.md create mode 100644 content/en/docs/Developerguide/concepts.md create mode 100644 content/en/docs/Developerguide/concurrency-control-mechanism.md create mode 100644 content/en/docs/Developerguide/concurrent-data-import-and-queries.md create mode 100644 content/en/docs/Developerguide/concurrent-insert-and-delete-in-the-same-table.md create mode 100644 content/en/docs/Developerguide/concurrent-insert-in-the-same-table.md create mode 100644 content/en/docs/Developerguide/concurrent-update-in-the-same-table.md create mode 100644 content/en/docs/Developerguide/concurrent-write-examples.md create mode 100644 content/en/docs/Developerguide/condition-expressions.md create mode 100644 content/en/docs/Developerguide/conditional-expression-functions.md create mode 100644 content/en/docs/Developerguide/conditional-statements.md create mode 100644 content/en/docs/Developerguide/config_settings.md create mode 100644 content/en/docs/Developerguide/configuration-examples.md create mode 100644 content/en/docs/Developerguide/configuration-file-reference.md create mode 100644 content/en/docs/Developerguide/configuration-settings-functions.md create mode 100644 content/en/docs/Developerguide/configuration.md create mode 100644 content/en/docs/Developerguide/configurations.md create mode 100644 content/en/docs/Developerguide/configuring-a-data-source-in-the-linux-os.md create mode 100644 content/en/docs/Developerguide/configuring-a-remote-connection.md create mode 100644 content/en/docs/Developerguide/configuring-client-access-authentication.md create mode 100644 content/en/docs/Developerguide/configuring-database-audit.md create mode 100644 content/en/docs/Developerguide/configuring-file-permission-security-policies.md create mode 100644 content/en/docs/Developerguide/configuring-llvm.md create mode 100644 content/en/docs/Developerguide/configuring-running-parameters.md create mode 100644 content/en/docs/Developerguide/confirming-connection-information.md create mode 100644 content/en/docs/Developerguide/connecting-to-a-database-0.md create mode 100644 content/en/docs/Developerguide/connecting-to-a-database.md create mode 100644 content/en/docs/Developerguide/connecting-to-the-database-(using-ssl).md create mode 100644 content/en/docs/Developerguide/connection-and-authentication.md create mode 100644 content/en/docs/Developerguide/connection-characters.md create mode 100644 content/en/docs/Developerguide/connection-pool-parameters.md create mode 100644 content/en/docs/Developerguide/connection-settings.md create mode 100644 content/en/docs/Developerguide/constant-and-macro.md create mode 100644 content/en/docs/Developerguide/constraints-on-index-use.md create mode 100644 content/en/docs/Developerguide/control-statements.md create mode 100644 content/en/docs/Developerguide/controlling-text-search.md create mode 100644 content/en/docs/Developerguide/controlling-transactions.md create mode 100644 content/en/docs/Developerguide/conversion-example.md create mode 100644 content/en/docs/Developerguide/converting-a-disk-table-into-a-mot-table.md create mode 100644 content/en/docs/Developerguide/converting.md create mode 100644 content/en/docs/Developerguide/copy.md create mode 100644 content/en/docs/Developerguide/copymanager.md create mode 100644 content/en/docs/Developerguide/core-dump-occurs-due-to-full-disk-space.md create mode 100644 content/en/docs/Developerguide/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log_directory.md create mode 100644 content/en/docs/Developerguide/core-fault-locating.md create mode 100644 content/en/docs/Developerguide/cost-based-vacuum-delay.md create mode 100644 content/en/docs/Developerguide/cpu.md create mode 100644 content/en/docs/Developerguide/create-data-source.md create mode 100644 content/en/docs/Developerguide/create-database.md create mode 100644 content/en/docs/Developerguide/create-directory.md create mode 100644 content/en/docs/Developerguide/create-function.md create mode 100644 content/en/docs/Developerguide/create-group.md create mode 100644 content/en/docs/Developerguide/create-index.md create mode 100644 content/en/docs/Developerguide/create-procedure.md create mode 100644 content/en/docs/Developerguide/create-role.md create mode 100644 content/en/docs/Developerguide/create-row-level-security-policy.md create mode 100644 content/en/docs/Developerguide/create-schema.md create mode 100644 content/en/docs/Developerguide/create-sequence.md create mode 100644 content/en/docs/Developerguide/create-synonym.md create mode 100644 content/en/docs/Developerguide/create-table-as.md create mode 100644 content/en/docs/Developerguide/create-table-partition.md create mode 100644 content/en/docs/Developerguide/create-table.md create mode 100644 content/en/docs/Developerguide/create-tablespace.md create mode 100644 content/en/docs/Developerguide/create-text-search-configuration.md create mode 100644 content/en/docs/Developerguide/create-text-search-dictionary.md create mode 100644 content/en/docs/Developerguide/create-trigger.md create mode 100644 content/en/docs/Developerguide/create-type.md create mode 100644 content/en/docs/Developerguide/create-user.md create mode 100644 content/en/docs/Developerguide/create-view.md create mode 100644 content/en/docs/Developerguide/creating-an-index-30.md create mode 100644 content/en/docs/Developerguide/creating-an-index.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-databases.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-indexes.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-partitioned-tables.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-schemas.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-sequences.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-tables.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-tablespaces.md create mode 100644 content/en/docs/Developerguide/creating-and-managing-views.md create mode 100644 content/en/docs/Developerguide/creating-dropping-a-mot-table.md create mode 100644 content/en/docs/Developerguide/creating-tables.md create mode 100644 content/en/docs/Developerguide/cursor-loop.md create mode 100644 content/en/docs/Developerguide/cursor-operations.md create mode 100644 content/en/docs/Developerguide/cursor.md create mode 100644 content/en/docs/Developerguide/cursors.md create mode 100644 content/en/docs/Developerguide/data-export-by-a-user-without-required-permissions.md create mode 100644 content/en/docs/Developerguide/data-import-using-copy-from-stdin.md create mode 100644 content/en/docs/Developerguide/data-type-conversion.md create mode 100644 content/en/docs/Developerguide/data-types-22.md create mode 100644 content/en/docs/Developerguide/data-types-36.md create mode 100644 content/en/docs/Developerguide/data-types-supported-by-column-store-tables.md create mode 100644 content/en/docs/Developerguide/data-types.md create mode 100644 content/en/docs/Developerguide/database-connection-control-functions.md create mode 100644 content/en/docs/Developerguide/database-logical-architecture.md create mode 100644 content/en/docs/Developerguide/database-object-functions.md create mode 100644 content/en/docs/Developerguide/database-security-management.md create mode 100644 content/en/docs/Developerguide/database-statement-execution-functions.md create mode 100644 content/en/docs/Developerguide/date-and-time-processing-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/date-time-types.md create mode 100644 content/en/docs/Developerguide/dbe_perf-schema.md create mode 100644 content/en/docs/Developerguide/dcl-syntax-overview.md create mode 100644 content/en/docs/Developerguide/ddl-syntax-overview.md create mode 100644 content/en/docs/Developerguide/deallocate.md create mode 100644 content/en/docs/Developerguide/debugging.md create mode 100644 content/en/docs/Developerguide/declare-syntax.md create mode 100644 content/en/docs/Developerguide/declare.md create mode 100644 content/en/docs/Developerguide/deep-copy.md create mode 100644 content/en/docs/Developerguide/default-mot-conf.md create mode 100644 content/en/docs/Developerguide/default-permission-mechanism.md create mode 100644 content/en/docs/Developerguide/default-settings-of-client-connection.md create mode 100644 content/en/docs/Developerguide/define-variable.md create mode 100644 content/en/docs/Developerguide/delete.md create mode 100644 content/en/docs/Developerguide/deleting-data-from-a-table.md create mode 100644 content/en/docs/Developerguide/deployment.md create mode 100644 content/en/docs/Developerguide/description.md create mode 100644 content/en/docs/Developerguide/design-principles.md create mode 100644 content/en/docs/Developerguide/design.md create mode 100644 content/en/docs/Developerguide/determining-the-scope-of-performance-tuning.md create mode 100644 content/en/docs/Developerguide/developer-options.md create mode 100644 content/en/docs/Developerguide/development-based-on-jdbc.md create mode 100644 content/en/docs/Developerguide/development-based-on-libpq.md create mode 100644 content/en/docs/Developerguide/development-based-on-odbc.md create mode 100644 content/en/docs/Developerguide/development-process-1.md create mode 100644 content/en/docs/Developerguide/development-process.md create mode 100644 content/en/docs/Developerguide/development-specifications.md create mode 100644 content/en/docs/Developerguide/dictionaries.md create mode 100644 content/en/docs/Developerguide/disk-space.md create mode 100644 content/en/docs/Developerguide/disk-ssd.md create mode 100644 content/en/docs/Developerguide/dml-syntax-overview.md create mode 100644 content/en/docs/Developerguide/do.md create mode 100644 content/en/docs/Developerguide/doing-vacuum-to-a-table.md create mode 100644 content/en/docs/Developerguide/drop-data-source.md create mode 100644 content/en/docs/Developerguide/drop-database.md create mode 100644 content/en/docs/Developerguide/drop-directory.md create mode 100644 content/en/docs/Developerguide/drop-function.md create mode 100644 content/en/docs/Developerguide/drop-group.md create mode 100644 content/en/docs/Developerguide/drop-index.md create mode 100644 content/en/docs/Developerguide/drop-owned.md create mode 100644 content/en/docs/Developerguide/drop-procedure.md create mode 100644 content/en/docs/Developerguide/drop-role.md create mode 100644 content/en/docs/Developerguide/drop-row-level-security-policy.md create mode 100644 content/en/docs/Developerguide/drop-schema.md create mode 100644 content/en/docs/Developerguide/drop-sequence.md create mode 100644 content/en/docs/Developerguide/drop-synonym.md create mode 100644 content/en/docs/Developerguide/drop-table.md create mode 100644 content/en/docs/Developerguide/drop-tablespace.md create mode 100644 content/en/docs/Developerguide/drop-text-search-configuration.md create mode 100644 content/en/docs/Developerguide/drop-text-search-dictionary.md create mode 100644 content/en/docs/Developerguide/drop-trigger.md create mode 100644 content/en/docs/Developerguide/drop-type.md create mode 100644 content/en/docs/Developerguide/drop-user.md create mode 100644 content/en/docs/Developerguide/drop-view.md create mode 100644 content/en/docs/Developerguide/durability-20.md create mode 100644 content/en/docs/Developerguide/durability.md create mode 100644 content/en/docs/Developerguide/dynamic-statements.md create mode 100644 content/en/docs/Developerguide/dynamically-calling-anonymous-blocks.md create mode 100644 content/en/docs/Developerguide/dynamically-calling-stored-procedures.md create mode 100644 content/en/docs/Developerguide/environment-deployment.md create mode 100644 content/en/docs/Developerguide/error-log.md create mode 100644 content/en/docs/Developerguide/error-reporting-and-logging.md create mode 100644 content/en/docs/Developerguide/error-trapping-statements.md create mode 100644 content/en/docs/Developerguide/errors-returned-to-the-user.md create mode 100644 content/en/docs/Developerguide/errors-written-the-log-file.md create mode 100644 content/en/docs/Developerguide/establishing-secure-tcp-ip-connections-in-ssh-tunnel-mode.md create mode 100644 content/en/docs/Developerguide/establishing-secure-tcp-ip-connections-in-ssl-mode.md create mode 100644 content/en/docs/Developerguide/example-1-importing-and-exporting-data-through-local-files.md create mode 100644 content/en/docs/Developerguide/example-2-migrating-data-from-a-my-database-to-opengauss.md create mode 100644 content/en/docs/Developerguide/example-2-migrating-data-from-a-mysql-database-to-the-opengauss-database.md create mode 100644 content/en/docs/Developerguide/example-3.md create mode 100644 content/en/docs/Developerguide/example-common-operations.md create mode 100644 content/en/docs/Developerguide/example-importing-and-exporting-data-through-local-files.md create mode 100644 content/en/docs/Developerguide/example-retrying-sql-queries-for-applications.md create mode 100644 content/en/docs/Developerguide/example.md create mode 100644 content/en/docs/Developerguide/examples.md create mode 100644 content/en/docs/Developerguide/exception-handling.md create mode 100644 content/en/docs/Developerguide/execute.md create mode 100644 content/en/docs/Developerguide/executing-dynamic-non-query-statements.md create mode 100644 content/en/docs/Developerguide/executing-dynamic-query-statements.md create mode 100644 content/en/docs/Developerguide/experience-in-rewriting-sql-statements.md create mode 100644 content/en/docs/Developerguide/explain-plan.md create mode 100644 content/en/docs/Developerguide/explain.md create mode 100644 content/en/docs/Developerguide/explicit-cursor.md create mode 100644 content/en/docs/Developerguide/exporting-a-database.md create mode 100644 content/en/docs/Developerguide/exporting-a-schema.md create mode 100644 content/en/docs/Developerguide/exporting-a-single-database.md create mode 100644 content/en/docs/Developerguide/exporting-a-table.md create mode 100644 content/en/docs/Developerguide/exporting-all-databases-6.md create mode 100644 content/en/docs/Developerguide/exporting-all-databases.md create mode 100644 content/en/docs/Developerguide/exporting-data.md create mode 100644 content/en/docs/Developerguide/exporting-global-objects.md create mode 100644 content/en/docs/Developerguide/expressions.md create mode 100644 content/en/docs/Developerguide/extended-fdw-and-other-gaussdb-features.md create mode 100644 content/en/docs/Developerguide/extended-functions.md create mode 100644 content/en/docs/Developerguide/extended-syntax.md create mode 100644 content/en/docs/Developerguide/external-support-tools-orange.md create mode 100644 content/en/docs/Developerguide/faqs.md create mode 100644 content/en/docs/Developerguide/fault-tolerance.md create mode 100644 content/en/docs/Developerguide/features-and-benefits.md create mode 100644 content/en/docs/Developerguide/fetch.md create mode 100644 "content/en/docs/Developerguide/figures/12-4-5-4-\347\256\227\345\255\220\347\272\247\350\260\203\344\274\230(\347\244\272\344\276\213\351\224\231\344\271\261\350\260\203\346\225\2641\357\274\211.png" create mode 100644 "content/en/docs/Developerguide/figures/12-4-5-4-\347\256\227\345\255\220\347\272\247\350\260\203\344\274\230(\347\244\272\344\276\213\351\224\231\344\271\261\350\260\203\346\225\2642\357\274\211.png" create mode 100644 "content/en/docs/Developerguide/figures/12-4-5-4-\347\256\227\345\255\220\347\272\247\350\260\203\344\274\230(\347\244\272\344\276\213\351\224\231\344\271\261\350\260\203\346\225\264\347\244\272\344\276\2133-1\357\274\211.png" create mode 100644 "content/en/docs/Developerguide/figures/12-4-5-4-\347\256\227\345\255\220\347\272\247\350\260\203\344\274\230(\347\244\272\344\276\213\351\224\231\344\271\261\350\260\203\346\225\264\347\244\272\344\276\2133\357\274\211.png" create mode 100644 content/en/docs/Developerguide/figures/all.png create mode 100644 content/en/docs/Developerguide/figures/anonymous_block.png create mode 100644 content/en/docs/Developerguide/figures/any-some.png create mode 100644 content/en/docs/Developerguide/figures/application-development-process-based-on-jdbc.png create mode 100644 content/en/docs/Developerguide/figures/assignment_value.png create mode 100644 content/en/docs/Developerguide/figures/call_anonymous_block.png create mode 100644 content/en/docs/Developerguide/figures/call_clause.png create mode 100644 content/en/docs/Developerguide/figures/call_procedure.png create mode 100644 content/en/docs/Developerguide/figures/case.jpg create mode 100644 content/en/docs/Developerguide/figures/case_when.png create mode 100644 content/en/docs/Developerguide/figures/close_cursor.jpg create mode 100644 content/en/docs/Developerguide/figures/coalesce.png create mode 100644 content/en/docs/Developerguide/figures/cursor_typename.png create mode 100644 content/en/docs/Developerguide/figures/database-logical-architecture.png create mode 100644 content/en/docs/Developerguide/figures/declare_variable.png create mode 100644 content/en/docs/Developerguide/figures/decode.png create mode 100644 content/en/docs/Developerguide/figures/dynamic_cursor_define.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0242381460.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0242381461.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0242381462.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0242381463.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0242381464.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0242381725.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0243595915.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0244851037.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0246254080.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0246254081.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0246254082.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0252660975.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0252663634.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253028833.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253030479.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253032870.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253036670.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253037239.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253038757.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253082069.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253403489.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253403490.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253404022.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0253404023.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713415.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713417.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713419.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713431.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713433.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713435.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713439.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713448.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713450.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713454.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257713456.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257806512.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257806513.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257839664.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257843947.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257843950.jpg create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854512.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854550.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854609.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854718.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854722.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854726.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854894.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854911.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257854947.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855009.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855024.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855073.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855157.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855235.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855271.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855327.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855330.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855378.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855379.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855432.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855450.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855460.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855485.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257855494.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257856189.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257856190.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257856191.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257856192.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257856193.png create mode 100644 content/en/docs/Developerguide/figures/en-us_image_0257860033.png create mode 100644 content/en/docs/Developerguide/figures/execute-immediate-dynamic_select_clause.png create mode 100644 content/en/docs/Developerguide/figures/execution-process-of-query-related-sql-statements-by-the-sql-engine.png create mode 100644 content/en/docs/Developerguide/figures/exists-not-exists.png create mode 100644 content/en/docs/Developerguide/figures/fetch_cursor.png create mode 100644 content/en/docs/Developerguide/figures/for_as_loop.png create mode 100644 content/en/docs/Developerguide/figures/for_loop.png create mode 100644 content/en/docs/Developerguide/figures/for_loop_query.png create mode 100644 content/en/docs/Developerguide/figures/forall.png create mode 100644 content/en/docs/Developerguide/figures/gaussdb-system-architecture.png create mode 100644 content/en/docs/Developerguide/figures/greatest.png create mode 100644 content/en/docs/Developerguide/figures/if_then.jpg create mode 100644 content/en/docs/Developerguide/figures/if_then_else.jpg create mode 100644 content/en/docs/Developerguide/figures/if_then_elsif_else.png create mode 100644 content/en/docs/Developerguide/figures/in-not-in.png create mode 100644 content/en/docs/Developerguide/figures/integrating-the-mot-engine.png create mode 100644 content/en/docs/Developerguide/figures/least.png create mode 100644 content/en/docs/Developerguide/figures/loop.png create mode 100644 content/en/docs/Developerguide/figures/memory-optimized-storage-engine-within-opengauss.png create mode 100644 content/en/docs/Developerguide/figures/mot-architecture.png create mode 100644 content/en/docs/Developerguide/figures/noselect.png create mode 100644 content/en/docs/Developerguide/figures/nullif.png create mode 100644 content/en/docs/Developerguide/figures/nvl.jpg create mode 100644 content/en/docs/Developerguide/figures/odbc-based-application-development-process.png create mode 100644 content/en/docs/Developerguide/figures/odbc-system-structure.png create mode 100644 content/en/docs/Developerguide/figures/open_dynamic_cursor.png create mode 100644 content/en/docs/Developerguide/figures/open_for.png create mode 100644 content/en/docs/Developerguide/figures/open_static_cursor.png create mode 100644 content/en/docs/Developerguide/figures/opengauss-performance-tuning.png create mode 100644 content/en/docs/Developerguide/figures/opengauss-service-response-process.jpg create mode 100644 content/en/docs/Developerguide/figures/private-(local)-memory-(for-each-transaction)-and-a-global-memory-(for-all-the-transactions-of-all-t.png create mode 100644 content/en/docs/Developerguide/figures/raise.png create mode 100644 content/en/docs/Developerguide/figures/raise_condition.png create mode 100644 content/en/docs/Developerguide/figures/raise_format.png create mode 100644 content/en/docs/Developerguide/figures/raise_option.png create mode 100644 content/en/docs/Developerguide/figures/raise_sqlstate.png create mode 100644 content/en/docs/Developerguide/figures/return_clause.jpg create mode 100644 content/en/docs/Developerguide/figures/sql-execution-plan-example.png create mode 100644 content/en/docs/Developerguide/figures/static_cursor_define.jpg create mode 100644 content/en/docs/Developerguide/figures/syntax-of-the-record-type.png create mode 100644 content/en/docs/Developerguide/figures/url.png create mode 100644 content/en/docs/Developerguide/figures/using_clause-0.png create mode 100644 content/en/docs/Developerguide/figures/using_clause-1.png create mode 100644 content/en/docs/Developerguide/figures/using_clause-2.png create mode 100644 content/en/docs/Developerguide/figures/using_clause.png create mode 100644 content/en/docs/Developerguide/figures/when_clause.png create mode 100644 content/en/docs/Developerguide/figures/while_loop.png create mode 100644 content/en/docs/Developerguide/figures/zh-cn_image_0118861065.jpg create mode 100644 "content/en/docs/Developerguide/figures/\346\226\207\346\241\243.png" create mode 100644 content/en/docs/Developerguide/file-location.md create mode 100644 content/en/docs/Developerguide/file.md create mode 100644 content/en/docs/Developerguide/file_iostat.md create mode 100644 content/en/docs/Developerguide/file_redo_iostat.md create mode 100644 content/en/docs/Developerguide/full-text-retrieval.md create mode 100644 content/en/docs/Developerguide/full-text-search.md create mode 100644 content/en/docs/Developerguide/functions-and-operators.md create mode 100644 content/en/docs/Developerguide/functions-for-asynchronous-command-processing.md create mode 100644 content/en/docs/Developerguide/functions-for-canceling-queries-in-progress.md create mode 100644 content/en/docs/Developerguide/functions.md create mode 100644 content/en/docs/Developerguide/garbage-collection.md create mode 100644 content/en/docs/Developerguide/gathering-document-statistics.md create mode 100644 content/en/docs/Developerguide/general-guidelines.md create mode 100644 content/en/docs/Developerguide/generating-certificates.md create mode 100644 content/en/docs/Developerguide/genetic-query-optimizer.md create mode 100644 content/en/docs/Developerguide/geometric-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/geometric.md create mode 100644 content/en/docs/Developerguide/gin-indexes.md create mode 100644 content/en/docs/Developerguide/gin-tips-and-tricks.md create mode 100644 content/en/docs/Developerguide/global_bgwriter_stat.md create mode 100644 content/en/docs/Developerguide/global_ckpt_status.md create mode 100644 content/en/docs/Developerguide/global_config_settings.md create mode 100644 content/en/docs/Developerguide/global_double_write_status.md create mode 100644 content/en/docs/Developerguide/global_file_iostat.md create mode 100644 content/en/docs/Developerguide/global_file_redo_iostat.md create mode 100644 content/en/docs/Developerguide/global_instance_time.md create mode 100644 content/en/docs/Developerguide/global_locks.md create mode 100644 content/en/docs/Developerguide/global_memory_node_detail.md create mode 100644 content/en/docs/Developerguide/global_operator_history.md create mode 100644 content/en/docs/Developerguide/global_operator_history_table.md create mode 100644 content/en/docs/Developerguide/global_operator_runtime.md create mode 100644 content/en/docs/Developerguide/global_os_runtime.md create mode 100644 content/en/docs/Developerguide/global_os_threads.md create mode 100644 content/en/docs/Developerguide/global_pagewriter_status.md create mode 100644 content/en/docs/Developerguide/global_record_reset_time.md create mode 100644 content/en/docs/Developerguide/global_recovery_status.md create mode 100644 content/en/docs/Developerguide/global_redo_status.md create mode 100644 content/en/docs/Developerguide/global_rel_iostat.md create mode 100644 content/en/docs/Developerguide/global_replication_slots.md create mode 100644 content/en/docs/Developerguide/global_replication_stat.md create mode 100644 content/en/docs/Developerguide/global_session_memory.md create mode 100644 content/en/docs/Developerguide/global_session_memory_detail.md create mode 100644 content/en/docs/Developerguide/global_session_stat.md create mode 100644 content/en/docs/Developerguide/global_session_stat_activity.md create mode 100644 content/en/docs/Developerguide/global_session_time.md create mode 100644 content/en/docs/Developerguide/global_shared_memory_detail.md create mode 100644 content/en/docs/Developerguide/global_stat_all_indexes.md create mode 100644 content/en/docs/Developerguide/global_stat_all_tables.md create mode 100644 content/en/docs/Developerguide/global_stat_bad_block.md create mode 100644 content/en/docs/Developerguide/global_stat_database.md create mode 100644 content/en/docs/Developerguide/global_stat_database_conflicts.md create mode 100644 content/en/docs/Developerguide/global_stat_db_cu.md create mode 100644 content/en/docs/Developerguide/global_stat_session_cu.md create mode 100644 content/en/docs/Developerguide/global_stat_sys_indexes.md create mode 100644 content/en/docs/Developerguide/global_stat_sys_tables.md create mode 100644 content/en/docs/Developerguide/global_stat_user_functions.md create mode 100644 content/en/docs/Developerguide/global_stat_user_indexes.md create mode 100644 content/en/docs/Developerguide/global_stat_user_tables.md create mode 100644 content/en/docs/Developerguide/global_stat_xact_all_tables.md create mode 100644 content/en/docs/Developerguide/global_stat_xact_sys_tables.md create mode 100644 content/en/docs/Developerguide/global_stat_xact_user_functions.md create mode 100644 content/en/docs/Developerguide/global_stat_xact_user_tables.md create mode 100644 content/en/docs/Developerguide/global_statement_complex_history.md create mode 100644 content/en/docs/Developerguide/global_statement_complex_history_table.md create mode 100644 content/en/docs/Developerguide/global_statement_complex_runtime.md create mode 100644 content/en/docs/Developerguide/global_statement_count.md create mode 100644 content/en/docs/Developerguide/global_statio_all_indexes.md create mode 100644 content/en/docs/Developerguide/global_statio_all_sequences.md create mode 100644 content/en/docs/Developerguide/global_statio_all_tables.md create mode 100644 content/en/docs/Developerguide/global_statio_sys_indexes.md create mode 100644 content/en/docs/Developerguide/global_statio_sys_sequences.md create mode 100644 content/en/docs/Developerguide/global_statio_sys_tables.md create mode 100644 content/en/docs/Developerguide/global_statio_user_indexes.md create mode 100644 content/en/docs/Developerguide/global_statio_user_sequences.md create mode 100644 content/en/docs/Developerguide/global_statio_user_tables.md create mode 100644 content/en/docs/Developerguide/global_thread_wait_status.md create mode 100644 content/en/docs/Developerguide/global_threadpool_status.md create mode 100644 content/en/docs/Developerguide/global_transactions_prepared_xacts.md create mode 100644 content/en/docs/Developerguide/global_wait_events.md create mode 100644 content/en/docs/Developerguide/global_workload_transaction.md create mode 100644 content/en/docs/Developerguide/goto-statements.md create mode 100644 content/en/docs/Developerguide/grant.md create mode 100644 content/en/docs/Developerguide/granting-user-permissions.md create mode 100644 content/en/docs/Developerguide/gs_basebackup.md create mode 100644 content/en/docs/Developerguide/gs_ctl-(full-and-incremental).md create mode 100644 content/en/docs/Developerguide/gs_dump.md create mode 100644 content/en/docs/Developerguide/gs_file_stat.md create mode 100644 content/en/docs/Developerguide/gs_instance_time.md create mode 100644 content/en/docs/Developerguide/gs_opt_model.md create mode 100644 content/en/docs/Developerguide/gs_os_run_info.md create mode 100644 content/en/docs/Developerguide/gs_redo_stat.md create mode 100644 content/en/docs/Developerguide/gs_restore.md create mode 100644 content/en/docs/Developerguide/gs_session_cpu_statistics.md create mode 100644 content/en/docs/Developerguide/gs_session_memory.md create mode 100644 content/en/docs/Developerguide/gs_session_memory_detail.md create mode 100644 content/en/docs/Developerguide/gs_session_memory_statistics.md create mode 100644 content/en/docs/Developerguide/gs_session_stat.md create mode 100644 content/en/docs/Developerguide/gs_session_time.md create mode 100644 content/en/docs/Developerguide/gs_sql_count.md create mode 100644 content/en/docs/Developerguide/gs_stat_db_cu.md create mode 100644 content/en/docs/Developerguide/gs_stat_session_cu.md create mode 100644 content/en/docs/Developerguide/gs_thread_memory_detail.md create mode 100644 content/en/docs/Developerguide/gs_total_memory_detail.md create mode 100644 content/en/docs/Developerguide/gs_wlm_instance_history.md create mode 100644 content/en/docs/Developerguide/gs_wlm_operator_history.md create mode 100644 content/en/docs/Developerguide/gs_wlm_operator_info.md create mode 100644 content/en/docs/Developerguide/gs_wlm_operator_statistics.md create mode 100644 content/en/docs/Developerguide/gs_wlm_plan_encoding_table.md create mode 100644 content/en/docs/Developerguide/gs_wlm_plan_operator_history.md create mode 100644 content/en/docs/Developerguide/gs_wlm_plan_operator_info.md create mode 100644 content/en/docs/Developerguide/gs_wlm_rebuild_user_resource_pool.md create mode 100644 content/en/docs/Developerguide/gs_wlm_resource_pool.md create mode 100644 content/en/docs/Developerguide/gs_wlm_session_history.md create mode 100644 content/en/docs/Developerguide/gs_wlm_session_info_all.md create mode 100644 content/en/docs/Developerguide/gs_wlm_session_query_info_all.md create mode 100644 content/en/docs/Developerguide/gs_wlm_session_statistics.md create mode 100644 content/en/docs/Developerguide/gs_wlm_user_info.md create mode 100644 content/en/docs/Developerguide/gs_wlm_user_resource_history.md create mode 100644 content/en/docs/Developerguide/guc-parameter-usage.md create mode 100644 content/en/docs/Developerguide/guc-parameters.md create mode 100644 content/en/docs/Developerguide/ha-replication.md create mode 100644 content/en/docs/Developerguide/highlighting-results.md create mode 100644 content/en/docs/Developerguide/hint-based-tuning.md create mode 100644 content/en/docs/Developerguide/hint-errors-conflicts-and-other-warnings.md create mode 100644 content/en/docs/Developerguide/i-o.md create mode 100644 content/en/docs/Developerguide/implementation.md create mode 100644 content/en/docs/Developerguide/implicit-cursor.md create mode 100644 content/en/docs/Developerguide/importing-data.md create mode 100644 content/en/docs/Developerguide/indexes.md create mode 100644 content/en/docs/Developerguide/insert.md create mode 100644 content/en/docs/Developerguide/inserting-data-to-tables.md create mode 100644 content/en/docs/Developerguide/instance.md create mode 100644 content/en/docs/Developerguide/instance_time.md create mode 100644 content/en/docs/Developerguide/integration-using-foreign-data-wrappers-(fdw).md create mode 100644 content/en/docs/Developerguide/introducing-mot.md create mode 100644 content/en/docs/Developerguide/introduction-20.md create mode 100644 content/en/docs/Developerguide/introduction-34.md create mode 100644 content/en/docs/Developerguide/introduction-to-the-copymanager-class.md create mode 100644 content/en/docs/Developerguide/introduction-to-the-sql-execution-plan.md create mode 100644 content/en/docs/Developerguide/introduction.md create mode 100644 content/en/docs/Developerguide/isolation-levels.md create mode 100644 content/en/docs/Developerguide/ispell-dictionary.md create mode 100644 content/en/docs/Developerguide/java-sql-callablestatement.md create mode 100644 content/en/docs/Developerguide/java-sql-connection.md create mode 100644 content/en/docs/Developerguide/java-sql-databasemetadata.md create mode 100644 content/en/docs/Developerguide/java-sql-driver.md create mode 100644 content/en/docs/Developerguide/java-sql-preparedstatement.md create mode 100644 content/en/docs/Developerguide/java-sql-resultset.md create mode 100644 content/en/docs/Developerguide/java-sql-resultsetmetadata.md create mode 100644 content/en/docs/Developerguide/java-sql-statement.md create mode 100644 content/en/docs/Developerguide/javax-naming-context.md create mode 100644 content/en/docs/Developerguide/javax-naming-spi-initialcontextfactory.md create mode 100644 content/en/docs/Developerguide/javax-sql-connectionpooldatasource.md create mode 100644 content/en/docs/Developerguide/javax-sql-datasource.md create mode 100644 content/en/docs/Developerguide/javax-sql-pooledconnection.md create mode 100644 content/en/docs/Developerguide/jdbc-interface-reference.md create mode 100644 content/en/docs/Developerguide/jdbc-package-driver-class-and-environment-class.md create mode 100644 content/en/docs/Developerguide/jdbc.md create mode 100644 content/en/docs/Developerguide/jit.md create mode 100644 content/en/docs/Developerguide/join-operation-hints.md create mode 100644 content/en/docs/Developerguide/join-order-hints.md create mode 100644 content/en/docs/Developerguide/json-functions.md create mode 100644 content/en/docs/Developerguide/json-types.md create mode 100644 content/en/docs/Developerguide/kernel-resource-usage.md create mode 100644 content/en/docs/Developerguide/keywords.md create mode 100644 content/en/docs/Developerguide/libpq.md create mode 100644 content/en/docs/Developerguide/limitations.md create mode 100644 content/en/docs/Developerguide/llvm-application-scenarios-and-restrictions.md create mode 100644 content/en/docs/Developerguide/load-management.md create mode 100644 content/en/docs/Developerguide/loading-the-driver.md create mode 100644 content/en/docs/Developerguide/local_rel_iostat.md create mode 100644 content/en/docs/Developerguide/local_threadpool_status.md create mode 100644 content/en/docs/Developerguide/lock-25.md create mode 100644 content/en/docs/Developerguide/lock-39.md create mode 100644 content/en/docs/Developerguide/lock-management.md create mode 100644 content/en/docs/Developerguide/lock-operations.md create mode 100644 content/en/docs/Developerguide/lock.md create mode 100644 content/en/docs/Developerguide/locks.md create mode 100644 content/en/docs/Developerguide/log-replay.md create mode 100644 content/en/docs/Developerguide/logging-21.md create mode 100644 content/en/docs/Developerguide/logging-content.md create mode 100644 content/en/docs/Developerguide/logging-destination.md create mode 100644 content/en/docs/Developerguide/logging-time.md create mode 100644 content/en/docs/Developerguide/logging.md create mode 100644 content/en/docs/Developerguide/logical-operators.md create mode 100644 content/en/docs/Developerguide/logical-replication-functions.md create mode 100644 content/en/docs/Developerguide/loop-statements.md create mode 100644 content/en/docs/Developerguide/maintaining-audit-logs.md create mode 100644 content/en/docs/Developerguide/managing-concurrent-write-operations.md create mode 100644 content/en/docs/Developerguide/managing-ssl-certificates.md create mode 100644 content/en/docs/Developerguide/managing-transactions.md create mode 100644 content/en/docs/Developerguide/managing-users-and-their-permissions.md create mode 100644 content/en/docs/Developerguide/manipulating-queries.md create mode 100644 content/en/docs/Developerguide/manipulating-tsvector.md create mode 100644 content/en/docs/Developerguide/mathematical-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/memory-24.md create mode 100644 content/en/docs/Developerguide/memory-25.md create mode 100644 content/en/docs/Developerguide/memory-26.md create mode 100644 content/en/docs/Developerguide/memory-38.md create mode 100644 content/en/docs/Developerguide/memory-40.md create mode 100644 content/en/docs/Developerguide/memory-and-storage-planning.md create mode 100644 content/en/docs/Developerguide/memory-management.md create mode 100644 content/en/docs/Developerguide/memory-optimized-storage-engine.md create mode 100644 content/en/docs/Developerguide/memory-planning.md create mode 100644 content/en/docs/Developerguide/memory.md create mode 100644 content/en/docs/Developerguide/memory_node_detail.md create mode 100644 content/en/docs/Developerguide/memsql.md create mode 100644 content/en/docs/Developerguide/merge-into.md create mode 100644 content/en/docs/Developerguide/microsoft-hekaton.md create mode 100644 content/en/docs/Developerguide/miscellaneous-parameters.md create mode 100644 content/en/docs/Developerguide/mode-matching-operators.md create mode 100644 content/en/docs/Developerguide/monetary.md create mode 100644 content/en/docs/Developerguide/monitoring.md create mode 100644 content/en/docs/Developerguide/mot-concepts.md create mode 100644 content/en/docs/Developerguide/mot-configuration-settings.md create mode 100644 content/en/docs/Developerguide/mot-error-messages.md create mode 100644 content/en/docs/Developerguide/mot-introduction.md create mode 100644 content/en/docs/Developerguide/mot-key-technologies.md create mode 100644 content/en/docs/Developerguide/mot-statistics.md create mode 100644 content/en/docs/Developerguide/mot-table-limitations.md create mode 100644 content/en/docs/Developerguide/mot.md create mode 100644 content/en/docs/Developerguide/move.md create mode 100644 content/en/docs/Developerguide/mpp_tables.md create mode 100644 content/en/docs/Developerguide/network-17.md create mode 100644 content/en/docs/Developerguide/network-26.md create mode 100644 content/en/docs/Developerguide/network-address-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/network-address.md create mode 100644 content/en/docs/Developerguide/network.md create mode 100644 content/en/docs/Developerguide/non-unique-indexes.md create mode 100644 content/en/docs/Developerguide/null-statements.md create mode 100644 content/en/docs/Developerguide/numa-awareness-allocation-and-affinity.md create mode 100644 content/en/docs/Developerguide/numeric-data-types.md create mode 100644 content/en/docs/Developerguide/nvme-disk.md create mode 100644 content/en/docs/Developerguide/object-identifier-types.md create mode 100644 content/en/docs/Developerguide/object.md create mode 100644 content/en/docs/Developerguide/obtaining-help-information-12.md create mode 100644 content/en/docs/Developerguide/obtaining-help-information.md create mode 100644 content/en/docs/Developerguide/occ-vs-2pl-differences-by-example.md create mode 100644 content/en/docs/Developerguide/odbc-interface-reference.md create mode 100644 content/en/docs/Developerguide/odbc-packages-dependent-libraries-and-header-files.md create mode 100644 content/en/docs/Developerguide/odbc.md create mode 100644 content/en/docs/Developerguide/opengauss-sql.md create mode 100644 content/en/docs/Developerguide/opengauss-transaction.md create mode 100644 content/en/docs/Developerguide/operation-auditing.md create mode 100644 content/en/docs/Developerguide/operator.md create mode 100644 content/en/docs/Developerguide/operator_history.md create mode 100644 content/en/docs/Developerguide/operator_history_table.md create mode 100644 content/en/docs/Developerguide/operator_runtime.md create mode 100644 content/en/docs/Developerguide/operators.md create mode 100644 content/en/docs/Developerguide/optimistic-concurrency-control.md create mode 100644 content/en/docs/Developerguide/optimizer-cost-constants.md create mode 100644 content/en/docs/Developerguide/optimizer-method-configuration.md create mode 100644 content/en/docs/Developerguide/optimizing-concurrent-queue-parameters.md create mode 100644 content/en/docs/Developerguide/optimizing-database-memory-parameters.md create mode 100644 content/en/docs/Developerguide/optimizing-database-parameters.md create mode 100644 content/en/docs/Developerguide/optimizing-operators.md create mode 100644 content/en/docs/Developerguide/optimizing-os-parameters.md create mode 100644 content/en/docs/Developerguide/optimizing-sql-self-diagnosis.md create mode 100644 content/en/docs/Developerguide/optimizing-statistics.md create mode 100644 content/en/docs/Developerguide/optimizing-subqueries.md create mode 100644 content/en/docs/Developerguide/oracle-in-memory-option-and-oracle-timesten.md create mode 100644 content/en/docs/Developerguide/original-information-table-of-wdr-snapshots.md create mode 100644 content/en/docs/Developerguide/os-environment-settings.md create mode 100644 content/en/docs/Developerguide/os-kernel-and-boot.md create mode 100644 content/en/docs/Developerguide/os.md create mode 100644 content/en/docs/Developerguide/os_runtime.md create mode 100644 content/en/docs/Developerguide/os_threads.md create mode 100644 content/en/docs/Developerguide/other-default-parameters.md create mode 100644 content/en/docs/Developerguide/other-factors-affecting-llvm-performance.md create mode 100644 content/en/docs/Developerguide/other-functions.md create mode 100644 content/en/docs/Developerguide/other-operations.md create mode 100644 content/en/docs/Developerguide/other-optimizer-options.md create mode 100644 content/en/docs/Developerguide/other-statements.md create mode 100644 content/en/docs/Developerguide/other-system-functions.md create mode 100644 content/en/docs/Developerguide/overview-10.md create mode 100644 content/en/docs/Developerguide/overview-15.md create mode 100644 content/en/docs/Developerguide/overview-16.md create mode 100644 content/en/docs/Developerguide/overview-17.md create mode 100644 content/en/docs/Developerguide/overview-18.md create mode 100644 content/en/docs/Developerguide/overview-19.md create mode 100644 content/en/docs/Developerguide/overview-2.md create mode 100644 content/en/docs/Developerguide/overview-23.md create mode 100644 content/en/docs/Developerguide/overview-24.md create mode 100644 content/en/docs/Developerguide/overview-27.md create mode 100644 content/en/docs/Developerguide/overview-28.md create mode 100644 content/en/docs/Developerguide/overview-29.md create mode 100644 content/en/docs/Developerguide/overview-31.md create mode 100644 content/en/docs/Developerguide/overview-37.md create mode 100644 content/en/docs/Developerguide/overview-5.md create mode 100644 content/en/docs/Developerguide/overview-7.md create mode 100644 content/en/docs/Developerguide/overview-8.md create mode 100644 content/en/docs/Developerguide/overview-of-system-catalogs-and-system-views.md create mode 100644 content/en/docs/Developerguide/overview.md create mode 100644 content/en/docs/Developerguide/parallel-data-import.md create mode 100644 content/en/docs/Developerguide/parser.md create mode 100644 content/en/docs/Developerguide/parsing-documents.md create mode 100644 content/en/docs/Developerguide/parsing-queries.md create mode 100644 content/en/docs/Developerguide/performance-benchmarks.md create mode 100644 content/en/docs/Developerguide/performance-metric-tpm-c.md create mode 100644 content/en/docs/Developerguide/performance-statistics.md create mode 100644 content/en/docs/Developerguide/performance-tuning.md create mode 100644 content/en/docs/Developerguide/performing-a-deep-copy-by-creating-a-temporary-table-and-truncating-the-original-table.md create mode 100644 content/en/docs/Developerguide/performing-a-deep-copy-by-using-the-create-table-like-statement.md create mode 100644 content/en/docs/Developerguide/performing-a-deep-copy-by-using-the-create-table-statement.md create mode 100644 content/en/docs/Developerguide/pg_aggregate.md create mode 100644 content/en/docs/Developerguide/pg_am.md create mode 100644 content/en/docs/Developerguide/pg_amop.md create mode 100644 content/en/docs/Developerguide/pg_amproc.md create mode 100644 content/en/docs/Developerguide/pg_app_workloadgroup_mapping.md create mode 100644 content/en/docs/Developerguide/pg_attrdef.md create mode 100644 content/en/docs/Developerguide/pg_attribute.md create mode 100644 content/en/docs/Developerguide/pg_auth_history.md create mode 100644 content/en/docs/Developerguide/pg_auth_members.md create mode 100644 content/en/docs/Developerguide/pg_authid.md create mode 100644 content/en/docs/Developerguide/pg_available_extension_versions.md create mode 100644 content/en/docs/Developerguide/pg_available_extensions.md create mode 100644 content/en/docs/Developerguide/pg_cast.md create mode 100644 content/en/docs/Developerguide/pg_class.md create mode 100644 content/en/docs/Developerguide/pg_collation.md create mode 100644 content/en/docs/Developerguide/pg_constraint.md create mode 100644 content/en/docs/Developerguide/pg_conversion.md create mode 100644 content/en/docs/Developerguide/pg_cursors.md create mode 100644 content/en/docs/Developerguide/pg_database.md create mode 100644 content/en/docs/Developerguide/pg_db_role_setting.md create mode 100644 content/en/docs/Developerguide/pg_default_acl.md create mode 100644 content/en/docs/Developerguide/pg_depend.md create mode 100644 content/en/docs/Developerguide/pg_description.md create mode 100644 content/en/docs/Developerguide/pg_directory.md create mode 100644 content/en/docs/Developerguide/pg_enum.md create mode 100644 content/en/docs/Developerguide/pg_ext_stats.md create mode 100644 content/en/docs/Developerguide/pg_extension.md create mode 100644 content/en/docs/Developerguide/pg_extension_data_source.md create mode 100644 content/en/docs/Developerguide/pg_foreign_data_wrapper.md create mode 100644 content/en/docs/Developerguide/pg_foreign_server.md create mode 100644 content/en/docs/Developerguide/pg_foreign_table.md create mode 100644 content/en/docs/Developerguide/pg_get_invalid_backends.md create mode 100644 content/en/docs/Developerguide/pg_get_senders_catchup_time.md create mode 100644 content/en/docs/Developerguide/pg_group.md create mode 100644 content/en/docs/Developerguide/pg_index.md create mode 100644 content/en/docs/Developerguide/pg_indexes.md create mode 100644 content/en/docs/Developerguide/pg_inherits.md create mode 100644 content/en/docs/Developerguide/pg_job.md create mode 100644 content/en/docs/Developerguide/pg_job_proc.md create mode 100644 content/en/docs/Developerguide/pg_language.md create mode 100644 content/en/docs/Developerguide/pg_largeobject.md create mode 100644 content/en/docs/Developerguide/pg_largeobject_metadata.md create mode 100644 content/en/docs/Developerguide/pg_locks.md create mode 100644 content/en/docs/Developerguide/pg_namespace.md create mode 100644 content/en/docs/Developerguide/pg_node_env.md create mode 100644 content/en/docs/Developerguide/pg_object.md create mode 100644 content/en/docs/Developerguide/pg_opclass.md create mode 100644 content/en/docs/Developerguide/pg_operator.md create mode 100644 content/en/docs/Developerguide/pg_opfamily.md create mode 100644 content/en/docs/Developerguide/pg_os_threads.md create mode 100644 content/en/docs/Developerguide/pg_partition.md create mode 100644 content/en/docs/Developerguide/pg_pltemplate.md create mode 100644 content/en/docs/Developerguide/pg_prepared_statements.md create mode 100644 content/en/docs/Developerguide/pg_prepared_xacts.md create mode 100644 content/en/docs/Developerguide/pg_proc.md create mode 100644 content/en/docs/Developerguide/pg_range.md create mode 100644 content/en/docs/Developerguide/pg_replication_slots.md create mode 100644 content/en/docs/Developerguide/pg_resource_pool.md create mode 100644 content/en/docs/Developerguide/pg_rewrite.md create mode 100644 content/en/docs/Developerguide/pg_rlspolicies.md create mode 100644 content/en/docs/Developerguide/pg_rlspolicy.md create mode 100644 content/en/docs/Developerguide/pg_roles.md create mode 100644 content/en/docs/Developerguide/pg_rules.md create mode 100644 content/en/docs/Developerguide/pg_seclabel.md create mode 100644 content/en/docs/Developerguide/pg_seclabels.md create mode 100644 content/en/docs/Developerguide/pg_session_iostat.md create mode 100644 content/en/docs/Developerguide/pg_session_wlmstat.md create mode 100644 content/en/docs/Developerguide/pg_settings.md create mode 100644 content/en/docs/Developerguide/pg_shadow.md create mode 100644 content/en/docs/Developerguide/pg_shdepend.md create mode 100644 content/en/docs/Developerguide/pg_shdescription.md create mode 100644 content/en/docs/Developerguide/pg_shseclabel.md create mode 100644 content/en/docs/Developerguide/pg_stat_activity.md create mode 100644 content/en/docs/Developerguide/pg_stat_all_indexes.md create mode 100644 content/en/docs/Developerguide/pg_stat_all_tables.md create mode 100644 content/en/docs/Developerguide/pg_stat_bad_block.md create mode 100644 content/en/docs/Developerguide/pg_stat_bgwriter.md create mode 100644 content/en/docs/Developerguide/pg_stat_database.md create mode 100644 content/en/docs/Developerguide/pg_stat_database_conflicts.md create mode 100644 content/en/docs/Developerguide/pg_stat_replication.md create mode 100644 content/en/docs/Developerguide/pg_stat_sys_indexes.md create mode 100644 content/en/docs/Developerguide/pg_stat_sys_tables.md create mode 100644 content/en/docs/Developerguide/pg_stat_user_functions.md create mode 100644 content/en/docs/Developerguide/pg_stat_user_indexes.md create mode 100644 content/en/docs/Developerguide/pg_stat_user_tables.md create mode 100644 content/en/docs/Developerguide/pg_stat_xact_all_tables.md create mode 100644 content/en/docs/Developerguide/pg_stat_xact_sys_tables.md create mode 100644 content/en/docs/Developerguide/pg_stat_xact_user_functions.md create mode 100644 content/en/docs/Developerguide/pg_stat_xact_user_tables.md create mode 100644 content/en/docs/Developerguide/pg_statio_all_indexes.md create mode 100644 content/en/docs/Developerguide/pg_statio_all_sequences.md create mode 100644 content/en/docs/Developerguide/pg_statio_all_tables.md create mode 100644 content/en/docs/Developerguide/pg_statio_sys_indexes.md create mode 100644 content/en/docs/Developerguide/pg_statio_sys_sequences.md create mode 100644 content/en/docs/Developerguide/pg_statio_sys_tables.md create mode 100644 content/en/docs/Developerguide/pg_statio_user_indexes.md create mode 100644 content/en/docs/Developerguide/pg_statio_user_sequences.md create mode 100644 content/en/docs/Developerguide/pg_statio_user_tables.md create mode 100644 content/en/docs/Developerguide/pg_statistic.md create mode 100644 content/en/docs/Developerguide/pg_statistic_ext.md create mode 100644 content/en/docs/Developerguide/pg_stats.md create mode 100644 content/en/docs/Developerguide/pg_tables.md create mode 100644 content/en/docs/Developerguide/pg_tablespace.md create mode 100644 content/en/docs/Developerguide/pg_tde_info.md create mode 100644 content/en/docs/Developerguide/pg_thread_wait_status.md create mode 100644 content/en/docs/Developerguide/pg_timezone_abbrevs.md create mode 100644 content/en/docs/Developerguide/pg_timezone_names.md create mode 100644 content/en/docs/Developerguide/pg_total_user_resource_info.md create mode 100644 content/en/docs/Developerguide/pg_total_user_resource_info_oid.md create mode 100644 content/en/docs/Developerguide/pg_trigger.md create mode 100644 content/en/docs/Developerguide/pg_ts_config.md create mode 100644 content/en/docs/Developerguide/pg_ts_config_map.md create mode 100644 content/en/docs/Developerguide/pg_ts_dict.md create mode 100644 content/en/docs/Developerguide/pg_ts_parser.md create mode 100644 content/en/docs/Developerguide/pg_ts_template.md create mode 100644 content/en/docs/Developerguide/pg_type.md create mode 100644 content/en/docs/Developerguide/pg_user.md create mode 100644 content/en/docs/Developerguide/pg_user_mapping.md create mode 100644 content/en/docs/Developerguide/pg_user_mappings.md create mode 100644 content/en/docs/Developerguide/pg_user_status.md create mode 100644 content/en/docs/Developerguide/pg_variable_info.md create mode 100644 content/en/docs/Developerguide/pg_views.md create mode 100644 content/en/docs/Developerguide/pg_wlm_statistics.md create mode 100644 content/en/docs/Developerguide/pg_workload_group.md create mode 100644 content/en/docs/Developerguide/pl-pgsql-functions.md create mode 100644 content/en/docs/Developerguide/plan-hint-optimization.md create mode 100644 content/en/docs/Developerguide/plan_table.md create mode 100644 content/en/docs/Developerguide/plan_table_data.md create mode 100644 content/en/docs/Developerguide/planning-a-storage-model.md create mode 100644 content/en/docs/Developerguide/platform-and-client-compatibility.md create mode 100644 content/en/docs/Developerguide/potential-deadlocks-during-concurrent-write.md create mode 100644 content/en/docs/Developerguide/pqcancel.md create mode 100644 content/en/docs/Developerguide/pqclear.md create mode 100644 content/en/docs/Developerguide/pqconnectdb.md create mode 100644 content/en/docs/Developerguide/pqconnectdbparams.md create mode 100644 content/en/docs/Developerguide/pqconnectstart.md create mode 100644 content/en/docs/Developerguide/pqconninfoparse.md create mode 100644 content/en/docs/Developerguide/pqerrormessage.md create mode 100644 content/en/docs/Developerguide/pqexec.md create mode 100644 content/en/docs/Developerguide/pqexecparams.md create mode 100644 content/en/docs/Developerguide/pqexecparamsbatch.md create mode 100644 content/en/docs/Developerguide/pqexecprepared.md create mode 100644 content/en/docs/Developerguide/pqexecpreparedbatch.md create mode 100644 content/en/docs/Developerguide/pqfinish.md create mode 100644 content/en/docs/Developerguide/pqflush.md create mode 100644 content/en/docs/Developerguide/pqfname.md create mode 100644 content/en/docs/Developerguide/pqfreecancel.md create mode 100644 content/en/docs/Developerguide/pqgetcancel.md create mode 100644 content/en/docs/Developerguide/pqgetvalue.md create mode 100644 content/en/docs/Developerguide/pqnfields.md create mode 100644 content/en/docs/Developerguide/pqntuples.md create mode 100644 content/en/docs/Developerguide/pqprepare.md create mode 100644 content/en/docs/Developerguide/pqreset.md create mode 100644 content/en/docs/Developerguide/pqresultstatus.md create mode 100644 content/en/docs/Developerguide/pqsendprepare.md create mode 100644 content/en/docs/Developerguide/pqsendquery.md create mode 100644 content/en/docs/Developerguide/pqsendqueryparams.md create mode 100644 content/en/docs/Developerguide/pqsendqueryprepared.md create mode 100644 content/en/docs/Developerguide/pqsetdblogin.md create mode 100644 content/en/docs/Developerguide/pqstatus.md create mode 100644 content/en/docs/Developerguide/predictor.md create mode 100644 content/en/docs/Developerguide/preparation.md create mode 100644 content/en/docs/Developerguide/prepare-transaction.md create mode 100644 content/en/docs/Developerguide/prepare.md create mode 100644 content/en/docs/Developerguide/prerequisite-check.md create mode 100644 content/en/docs/Developerguide/prerequisites.md create mode 100644 content/en/docs/Developerguide/primary-server.md create mode 100644 content/en/docs/Developerguide/processing-data-in-a-result-set.md create mode 100644 content/en/docs/Developerguide/pseudo-types.md create mode 100644 content/en/docs/Developerguide/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Developerguide/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Developerguide/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Developerguide/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Developerguide/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Developerguide/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Developerguide/query-28.md create mode 100644 content/en/docs/Developerguide/query-43.md create mode 100644 content/en/docs/Developerguide/query-and-index-statistics-collector.md create mode 100644 content/en/docs/Developerguide/query-execution-process.md create mode 100644 content/en/docs/Developerguide/query-native-compilation-(jit).md create mode 100644 content/en/docs/Developerguide/query-native-compilation-orange.md create mode 100644 content/en/docs/Developerguide/query-planning.md create mode 100644 content/en/docs/Developerguide/query-request-handling-process.md create mode 100644 content/en/docs/Developerguide/query.md create mode 100644 content/en/docs/Developerguide/querying-audit-results.md create mode 100644 content/en/docs/Developerguide/querying-sql-statements-that-affect-performance-most.md create mode 100644 content/en/docs/Developerguide/querying-system-catalogs.md create mode 100644 content/en/docs/Developerguide/range-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/ranking-search-results.md create mode 100644 content/en/docs/Developerguide/reassign-owned.md create mode 100644 content/en/docs/Developerguide/recommended-suggestions-for-llvm.md create mode 100644 content/en/docs/Developerguide/record.md create mode 100644 content/en/docs/Developerguide/recovery-18.md create mode 100644 content/en/docs/Developerguide/recovery-23.md create mode 100644 content/en/docs/Developerguide/recovery.md create mode 100644 content/en/docs/Developerguide/redo-log.md create mode 100644 content/en/docs/Developerguide/reindex.md create mode 100644 content/en/docs/Developerguide/release-savepoint.md create mode 100644 content/en/docs/Developerguide/replacing-certificates.md create mode 100644 content/en/docs/Developerguide/replication-and-high-availability.md create mode 100644 content/en/docs/Developerguide/replication_slots.md create mode 100644 content/en/docs/Developerguide/replication_stat.md create mode 100644 content/en/docs/Developerguide/reset.md create mode 100644 content/en/docs/Developerguide/resetting-key-parameters-during-sql-tuning.md create mode 100644 content/en/docs/Developerguide/resetting-parameters.md create mode 100644 content/en/docs/Developerguide/resource-consumption.md create mode 100644 content/en/docs/Developerguide/result-linear-scale-up-many-core.md create mode 100644 content/en/docs/Developerguide/results-report.md create mode 100644 content/en/docs/Developerguide/retry-management.md create mode 100644 content/en/docs/Developerguide/retrying-a-aborted-transaction-orange.md create mode 100644 content/en/docs/Developerguide/return-next-and-return-query.md create mode 100644 content/en/docs/Developerguide/return-statements.md create mode 100644 content/en/docs/Developerguide/return.md create mode 100644 content/en/docs/Developerguide/reviewing-and-modifying-a-table-definition.md create mode 100644 content/en/docs/Developerguide/revoke.md create mode 100644 content/en/docs/Developerguide/rewriting-queries.md create mode 100644 content/en/docs/Developerguide/roles.md create mode 100644 content/en/docs/Developerguide/rollback-prepared.md create mode 100644 content/en/docs/Developerguide/rollback-to-savepoint.md create mode 100644 content/en/docs/Developerguide/rollback.md create mode 100644 content/en/docs/Developerguide/row-expressions.md create mode 100644 content/en/docs/Developerguide/row-level-access-control.md create mode 100644 content/en/docs/Developerguide/rows-hints.md create mode 100644 content/en/docs/Developerguide/running-sql-statements.md create mode 100644 content/en/docs/Developerguide/running-the-copy-from-stdin-statement-to-import-data.md create mode 100644 content/en/docs/Developerguide/running-the-insert-statement-to-insert-data.md create mode 100644 content/en/docs/Developerguide/sample-workloads.md create mode 100644 content/en/docs/Developerguide/savepoint.md create mode 100644 content/en/docs/Developerguide/scalability.md create mode 100644 content/en/docs/Developerguide/scale-out-distributed-database.md create mode 100644 content/en/docs/Developerguide/scale-out.md create mode 100644 content/en/docs/Developerguide/scale-up-architecture.md create mode 100644 content/en/docs/Developerguide/scan-operation-hints.md create mode 100644 content/en/docs/Developerguide/schemas.md create mode 100644 content/en/docs/Developerguide/seamless-integration-of-mot-with-gaussdb.md create mode 100644 content/en/docs/Developerguide/searching-a-table.md create mode 100644 content/en/docs/Developerguide/secondary-index-support.md create mode 100644 content/en/docs/Developerguide/security-and-authentication-(postgresql-conf).md create mode 100644 content/en/docs/Developerguide/security-functions.md create mode 100644 content/en/docs/Developerguide/select-into.md create mode 100644 content/en/docs/Developerguide/select.md create mode 100644 content/en/docs/Developerguide/selecting-a-data-type.md create mode 100644 content/en/docs/Developerguide/selecting-a-storage-model.md create mode 100644 content/en/docs/Developerguide/sending-server.md create mode 100644 content/en/docs/Developerguide/separation-of-duties.md create mode 100644 content/en/docs/Developerguide/sequence-functions.md create mode 100644 content/en/docs/Developerguide/server-optimization-arm-huawei-taishan-4p.md create mode 100644 content/en/docs/Developerguide/server-optimization-x86.md create mode 100644 content/en/docs/Developerguide/server-signal-functions.md create mode 100644 content/en/docs/Developerguide/session-thread.md create mode 100644 content/en/docs/Developerguide/session_cpu_runtime.md create mode 100644 content/en/docs/Developerguide/session_memory.md create mode 100644 content/en/docs/Developerguide/session_memory_detail.md create mode 100644 content/en/docs/Developerguide/session_memory_runtime.md create mode 100644 content/en/docs/Developerguide/session_stat.md create mode 100644 content/en/docs/Developerguide/session_stat_activity.md create mode 100644 content/en/docs/Developerguide/session_time.md create mode 100644 content/en/docs/Developerguide/set-constraints.md create mode 100644 content/en/docs/Developerguide/set-returning-functions.md create mode 100644 content/en/docs/Developerguide/set-role.md create mode 100644 content/en/docs/Developerguide/set-session-authorization.md create mode 100644 content/en/docs/Developerguide/set-transaction.md create mode 100644 content/en/docs/Developerguide/set.md create mode 100644 content/en/docs/Developerguide/setting-account-security-policies.md create mode 100644 content/en/docs/Developerguide/setting-password-security-policies.md create mode 100644 content/en/docs/Developerguide/setting-security-policies.md create mode 100644 content/en/docs/Developerguide/setting-the-validity-period-of-an-account.md create mode 100644 content/en/docs/Developerguide/setting-up-and-running-benchmarksql.md create mode 100644 content/en/docs/Developerguide/setting-user-permissions.md create mode 100644 content/en/docs/Developerguide/settings.md create mode 100644 content/en/docs/Developerguide/shared_memory_detail.md create mode 100644 content/en/docs/Developerguide/show.md create mode 100644 content/en/docs/Developerguide/simple-dictionary.md create mode 100644 content/en/docs/Developerguide/simple-expressions.md create mode 100644 content/en/docs/Developerguide/snapshot-snapshot.md create mode 100644 content/en/docs/Developerguide/snapshot-synchronization-functions.md create mode 100644 content/en/docs/Developerguide/snapshot-tables_snap_timestamp.md create mode 100644 content/en/docs/Developerguide/snowball-dictionary.md create mode 100644 content/en/docs/Developerguide/sql-coverage-and-limitations.md create mode 100644 content/en/docs/Developerguide/sql-optimization.md create mode 100644 content/en/docs/Developerguide/sql-reference.md create mode 100644 content/en/docs/Developerguide/sql-syntax.md create mode 100644 content/en/docs/Developerguide/sqlallocconnect.md create mode 100644 content/en/docs/Developerguide/sqlallocenv.md create mode 100644 content/en/docs/Developerguide/sqlallochandle.md create mode 100644 content/en/docs/Developerguide/sqlallocstmt.md create mode 100644 content/en/docs/Developerguide/sqlbindcol.md create mode 100644 content/en/docs/Developerguide/sqlbindparameter.md create mode 100644 content/en/docs/Developerguide/sqlcolattribute.md create mode 100644 content/en/docs/Developerguide/sqlconnect.md create mode 100644 content/en/docs/Developerguide/sqldiag.md create mode 100644 content/en/docs/Developerguide/sqldisconnect.md create mode 100644 content/en/docs/Developerguide/sqlexecdirect.md create mode 100644 content/en/docs/Developerguide/sqlexecute.md create mode 100644 content/en/docs/Developerguide/sqlfetch.md create mode 100644 content/en/docs/Developerguide/sqlfreeconnect.md create mode 100644 content/en/docs/Developerguide/sqlfreeenv.md create mode 100644 content/en/docs/Developerguide/sqlfreehandle.md create mode 100644 content/en/docs/Developerguide/sqlfreestmt.md create mode 100644 content/en/docs/Developerguide/sqlgetdata.md create mode 100644 content/en/docs/Developerguide/sqlgetdiagrec.md create mode 100644 content/en/docs/Developerguide/sqlprepare.md create mode 100644 content/en/docs/Developerguide/sqlsetconnectattr.md create mode 100644 content/en/docs/Developerguide/sqlsetenvattr.md create mode 100644 content/en/docs/Developerguide/sqlsetstmtattr.md create mode 100644 content/en/docs/Developerguide/standby-node-in-the-need-repair-(wal)-state.md create mode 100644 content/en/docs/Developerguide/standby-server.md create mode 100644 content/en/docs/Developerguide/start-transaction.md create mode 100644 content/en/docs/Developerguide/stat_all_indexes.md create mode 100644 content/en/docs/Developerguide/stat_all_tables.md create mode 100644 content/en/docs/Developerguide/stat_bad_block.md create mode 100644 content/en/docs/Developerguide/stat_database.md create mode 100644 content/en/docs/Developerguide/stat_database_conflicts.md create mode 100644 content/en/docs/Developerguide/stat_sys_indexes.md create mode 100644 content/en/docs/Developerguide/stat_sys_tables.md create mode 100644 content/en/docs/Developerguide/stat_user_functions.md create mode 100644 content/en/docs/Developerguide/stat_user_indexes.md create mode 100644 content/en/docs/Developerguide/stat_user_tables.md create mode 100644 content/en/docs/Developerguide/stat_xact_all_tables.md create mode 100644 content/en/docs/Developerguide/stat_xact_sys_tables.md create mode 100644 content/en/docs/Developerguide/stat_xact_user_functions.md create mode 100644 content/en/docs/Developerguide/stat_xact_user_tables.md create mode 100644 content/en/docs/Developerguide/statement-behavior.md create mode 100644 content/en/docs/Developerguide/statement.md create mode 100644 content/en/docs/Developerguide/statement_complex_history.md create mode 100644 content/en/docs/Developerguide/statement_complex_history_table.md create mode 100644 content/en/docs/Developerguide/statement_complex_runtime.md create mode 100644 content/en/docs/Developerguide/statement_count.md create mode 100644 content/en/docs/Developerguide/statement_iostat_complex_runtime.md create mode 100644 content/en/docs/Developerguide/statement_responsetime_percentile.md create mode 100644 content/en/docs/Developerguide/statement_user_complex_history.md create mode 100644 content/en/docs/Developerguide/statement_wlmstat_complex_runtime.md create mode 100644 content/en/docs/Developerguide/statio_all_indexes.md create mode 100644 content/en/docs/Developerguide/statio_all_sequences.md create mode 100644 content/en/docs/Developerguide/statio_all_tables.md create mode 100644 content/en/docs/Developerguide/statio_sys_indexes.md create mode 100644 content/en/docs/Developerguide/statio_sys_sequences.md create mode 100644 content/en/docs/Developerguide/statio_sys_tables.md create mode 100644 content/en/docs/Developerguide/statio_user_indexes.md create mode 100644 content/en/docs/Developerguide/statio_user_sequences.md create mode 100644 content/en/docs/Developerguide/statio_user_tables.md create mode 100644 content/en/docs/Developerguide/statistics-during-the-database-running.md create mode 100644 content/en/docs/Developerguide/statistics-information-functions.md create mode 100644 content/en/docs/Developerguide/statistics.md create mode 100644 content/en/docs/Developerguide/stop-words.md create mode 100644 content/en/docs/Developerguide/storage-io.md create mode 100644 content/en/docs/Developerguide/storage.md create mode 100644 content/en/docs/Developerguide/stored-procedure-21.md create mode 100644 content/en/docs/Developerguide/stored-procedure-35.md create mode 100644 content/en/docs/Developerguide/stored-procedure.md create mode 100644 content/en/docs/Developerguide/sublink-name-hints.md create mode 100644 content/en/docs/Developerguide/subprogram.md create mode 100644 content/en/docs/Developerguide/subquery-expressions.md create mode 100644 content/en/docs/Developerguide/summary_file_iostat.md create mode 100644 content/en/docs/Developerguide/summary_file_redo_iostat.md create mode 100644 content/en/docs/Developerguide/summary_rel_iostat.md create mode 100644 content/en/docs/Developerguide/summary_stat_all_indexes.md create mode 100644 content/en/docs/Developerguide/summary_stat_all_tables.md create mode 100644 content/en/docs/Developerguide/summary_stat_bad_block.md create mode 100644 content/en/docs/Developerguide/summary_stat_database.md create mode 100644 content/en/docs/Developerguide/summary_stat_database_conflicts.md create mode 100644 content/en/docs/Developerguide/summary_stat_sys_indexes.md create mode 100644 content/en/docs/Developerguide/summary_stat_sys_tables.md create mode 100644 content/en/docs/Developerguide/summary_stat_user_functions.md create mode 100644 content/en/docs/Developerguide/summary_stat_user_indexes.md create mode 100644 content/en/docs/Developerguide/summary_stat_user_tables.md create mode 100644 content/en/docs/Developerguide/summary_stat_xact_all_tables.md create mode 100644 content/en/docs/Developerguide/summary_stat_xact_sys_tables.md create mode 100644 content/en/docs/Developerguide/summary_stat_xact_user_functions.md create mode 100644 content/en/docs/Developerguide/summary_stat_xact_user_tables.md create mode 100644 content/en/docs/Developerguide/summary_statement.md create mode 100644 content/en/docs/Developerguide/summary_statement_count.md create mode 100644 content/en/docs/Developerguide/summary_statio_all_indexes.md create mode 100644 content/en/docs/Developerguide/summary_statio_all_sequences.md create mode 100644 content/en/docs/Developerguide/summary_statio_all_tables.md create mode 100644 content/en/docs/Developerguide/summary_statio_sys_indexes.md create mode 100644 content/en/docs/Developerguide/summary_statio_sys_sequences.md create mode 100644 content/en/docs/Developerguide/summary_statio_sys_tables.md create mode 100644 content/en/docs/Developerguide/summary_statio_user_indexes.md create mode 100644 content/en/docs/Developerguide/summary_statio_user_sequences.md create mode 100644 content/en/docs/Developerguide/summary_statio_user_tables.md create mode 100644 content/en/docs/Developerguide/summary_transactions_prepared_xacts.md create mode 100644 content/en/docs/Developerguide/summary_user_login.md create mode 100644 content/en/docs/Developerguide/summary_workload_sql_count.md create mode 100644 content/en/docs/Developerguide/summary_workload_sql_elapse_time.md create mode 100644 content/en/docs/Developerguide/summary_workload_transaction.md create mode 100644 content/en/docs/Developerguide/synonym-dictionary.md create mode 100644 content/en/docs/Developerguide/system-administration-functions.md create mode 100644 content/en/docs/Developerguide/system-catalogs-and-system-views.md create mode 100644 content/en/docs/Developerguide/system-catalogs.md create mode 100644 content/en/docs/Developerguide/system-information-functions.md create mode 100644 content/en/docs/Developerguide/system-level-optimization.md create mode 100644 content/en/docs/Developerguide/system-operation.md create mode 100644 content/en/docs/Developerguide/system-optimization.md create mode 100644 content/en/docs/Developerguide/system-performance-snapshot.md create mode 100644 content/en/docs/Developerguide/system-views.md create mode 100644 content/en/docs/Developerguide/tables-and-indexes.md create mode 100644 content/en/docs/Developerguide/technical-requirements.md create mode 100644 content/en/docs/Developerguide/testing-a-configuration.md create mode 100644 content/en/docs/Developerguide/testing-a-dictionary.md create mode 100644 content/en/docs/Developerguide/testing-a-parser.md create mode 100644 content/en/docs/Developerguide/testing-and-debugging-text-search.md create mode 100644 content/en/docs/Developerguide/text-search-functions-and-operators.md create mode 100644 content/en/docs/Developerguide/text-search-types.md create mode 100644 content/en/docs/Developerguide/thesaurus-dictionary.md create mode 100644 content/en/docs/Developerguide/thread_wait_status.md create mode 100644 content/en/docs/Developerguide/tpc-c-benchmark.md create mode 100644 content/en/docs/Developerguide/transaction-isolation.md create mode 100644 content/en/docs/Developerguide/transaction.md create mode 100644 content/en/docs/Developerguide/transactions_prepared_xacts.md create mode 100644 content/en/docs/Developerguide/trigger-functions.md create mode 100644 content/en/docs/Developerguide/troubleshooting-14.md create mode 100644 content/en/docs/Developerguide/troubleshooting.md create mode 100644 content/en/docs/Developerguide/truncate.md create mode 100644 content/en/docs/Developerguide/tuning-process.md create mode 100644 content/en/docs/Developerguide/type-conversion-functions.md create mode 100644 content/en/docs/Developerguide/type-conversion.md create mode 100644 content/en/docs/Developerguide/typical-sql-optimization-methods.md create mode 100644 content/en/docs/Developerguide/union-case-and-related-constructs.md create mode 100644 content/en/docs/Developerguide/universal-file-access-functions.md create mode 100644 content/en/docs/Developerguide/unsupported-data-types.md create mode 100644 content/en/docs/Developerguide/unsupported-dmls.md create mode 100644 content/en/docs/Developerguide/unsupported-features.md create mode 100644 content/en/docs/Developerguide/unsupported-index-ddls-and-index.md create mode 100644 content/en/docs/Developerguide/unsupported-queries-for-native-compilation-and-lite-execution.md create mode 100644 content/en/docs/Developerguide/unsupported-table-ddls.md create mode 100644 content/en/docs/Developerguide/update.md create mode 100644 content/en/docs/Developerguide/updating-a-table-by-using-dml-statements.md create mode 100644 content/en/docs/Developerguide/updating-and-inserting-data-by-using-the-merge-into-statement.md create mode 100644 content/en/docs/Developerguide/updating-data-in-a-table-4.md create mode 100644 content/en/docs/Developerguide/updating-data-in-a-table.md create mode 100644 content/en/docs/Developerguide/updating-statistics.md create mode 100644 content/en/docs/Developerguide/upgrade-parameters.md create mode 100644 content/en/docs/Developerguide/usage-guide-11.md create mode 100644 content/en/docs/Developerguide/usage-guide-9.md create mode 100644 content/en/docs/Developerguide/usage-guide.md create mode 100644 content/en/docs/Developerguide/usage-scenarios.md create mode 100644 content/en/docs/Developerguide/usage.md create mode 100644 content/en/docs/Developerguide/user-and-permission-audit.md create mode 100644 content/en/docs/Developerguide/user-defined-functions.md create mode 100644 content/en/docs/Developerguide/user_login.md create mode 100644 content/en/docs/Developerguide/users.md create mode 100644 content/en/docs/Developerguide/using-a-gsql-meta-command-to-import-data.md create mode 100644 content/en/docs/Developerguide/using-csv-log-output.md create mode 100644 content/en/docs/Developerguide/using-gs_dump-and-gs_dumpall-to-export-data.md create mode 100644 content/en/docs/Developerguide/using-gs_restore-to-import-data.md create mode 100644 content/en/docs/Developerguide/using-gsql-to-connect-to-a-database.md create mode 100644 content/en/docs/Developerguide/using-mot.md create mode 100644 content/en/docs/Developerguide/using-partitioned-tables.md create mode 100644 content/en/docs/Developerguide/using-pcks.md create mode 100644 content/en/docs/Developerguide/utility.md create mode 100644 content/en/docs/Developerguide/uuid-type.md create mode 100644 content/en/docs/Developerguide/vacuum-33.md create mode 100644 content/en/docs/Developerguide/vacuum.md create mode 100644 content/en/docs/Developerguide/value-storage.md create mode 100644 content/en/docs/Developerguide/values.md create mode 100644 content/en/docs/Developerguide/version-and-platform-compatibility.md create mode 100644 content/en/docs/Developerguide/viewing-data.md create mode 100644 content/en/docs/Developerguide/viewing-parameter-values.md create mode 100644 content/en/docs/Developerguide/voltdb.md create mode 100644 content/en/docs/Developerguide/wait-events-27.md create mode 100644 content/en/docs/Developerguide/wait-events-42.md create mode 100644 content/en/docs/Developerguide/wait-events.md create mode 100644 content/en/docs/Developerguide/wait_events.md create mode 100644 content/en/docs/Developerguide/wdr-snapshot-data-table.md create mode 100644 content/en/docs/Developerguide/wdr-snapshot-schema.md create mode 100644 content/en/docs/Developerguide/what-is-a-document.md create mode 100644 content/en/docs/Developerguide/when-the-tpc-c-is-running-and-a-disk-to-be-injected-is-full-the-tpc-c-stops-responding.md create mode 100644 content/en/docs/Developerguide/window-functions.md create mode 100644 content/en/docs/Developerguide/wlm_user_resource_config.md create mode 100644 content/en/docs/Developerguide/wlm_user_resource_runtime.md create mode 100644 content/en/docs/Developerguide/workflow-overview.md create mode 100644 content/en/docs/Developerguide/working-with-databases.md create mode 100644 content/en/docs/Developerguide/workload-manager.md create mode 100644 content/en/docs/Developerguide/workload.md create mode 100644 content/en/docs/Developerguide/workload_sql_count.md create mode 100644 content/en/docs/Developerguide/workload_sql_elapse_time.md create mode 100644 content/en/docs/Developerguide/workload_transaction.md create mode 100644 content/en/docs/Developerguide/write-ahead-log.md create mode 100644 content/en/docs/Developerguide/write-and-read-write-operations.md create mode 100644 content/en/docs/Developerguide/x-tuner.md create mode 100644 content/en/docs/Developerguide/zone-and-formatting.md create mode 100644 content/en/docs/Glossary/Glossary.md create mode 100644 content/en/docs/Glossary/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Glossary/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Glossary/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Glossary/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Glossary/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Glossary/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Quickstart/(optional)-setting-the-standby-node-to-readable.md create mode 100644 content/en/docs/Quickstart/Quickstart.md create mode 100644 content/en/docs/Quickstart/alarm-detection.md create mode 100644 content/en/docs/Quickstart/archiving.md create mode 100644 content/en/docs/Quickstart/asynchronous-i-o-operations.md create mode 100644 content/en/docs/Quickstart/audit-switch.md create mode 100644 content/en/docs/Quickstart/auditing.md create mode 100644 content/en/docs/Quickstart/automatic-vacuuming.md create mode 100644 content/en/docs/Quickstart/background-writer.md create mode 100644 content/en/docs/Quickstart/checking-the-health-status.md create mode 100644 content/en/docs/Quickstart/checkpoints.md create mode 100644 content/en/docs/Quickstart/communication-library-parameters.md create mode 100644 content/en/docs/Quickstart/compatibility-with-earlier-versions.md create mode 100644 content/en/docs/Quickstart/configuration-file-for-primary-secondary-deployment.md create mode 100644 content/en/docs/Quickstart/configuration-file-for-single-instance-deployment.md create mode 100644 content/en/docs/Quickstart/configuring-a-whitelist-using-gs_guc.md create mode 100644 content/en/docs/Quickstart/configuring-opengauss-parameters.md create mode 100644 content/en/docs/Quickstart/configuring-os-parameters.md create mode 100644 content/en/docs/Quickstart/configuring-parameters-in-configuration-files.md create mode 100644 content/en/docs/Quickstart/configuring-primary-database-node-information.md create mode 100644 content/en/docs/Quickstart/configuring-running-parameters.md create mode 100644 content/en/docs/Quickstart/configuring-the-basic-host-information.md create mode 100644 content/en/docs/Quickstart/configuring-the-database-name-and-directories.md create mode 100644 content/en/docs/Quickstart/configuring-the-locale-and-character-set.md create mode 100644 content/en/docs/Quickstart/confirming-connection-information.md create mode 100644 content/en/docs/Quickstart/connecting-to-a-database-locally.md create mode 100644 content/en/docs/Quickstart/connecting-to-a-database-remotely.md create mode 100644 content/en/docs/Quickstart/connecting-to-a-database.md create mode 100644 content/en/docs/Quickstart/connection-and-authentication.md create mode 100644 content/en/docs/Quickstart/connection-pool-parameters.md create mode 100644 content/en/docs/Quickstart/connection-settings.md create mode 100644 content/en/docs/Quickstart/cost-based-vacuum-delay.md create mode 100644 content/en/docs/Quickstart/creating-a-configuration-file.md create mode 100644 content/en/docs/Quickstart/creating-a-database.md create mode 100644 content/en/docs/Quickstart/creating-a-role.md create mode 100644 content/en/docs/Quickstart/creating-a-table.md create mode 100644 content/en/docs/Quickstart/creating-a-user.md create mode 100644 content/en/docs/Quickstart/creating-the-required-user-account-and-configuring-the-installation-environment.md create mode 100644 content/en/docs/Quickstart/default-settings-of-client-connection.md create mode 100644 content/en/docs/Quickstart/developer-options.md create mode 100644 content/en/docs/Quickstart/disabling-the-os-firewall.md create mode 100644 content/en/docs/Quickstart/disabling-the-swap-memory.md create mode 100644 content/en/docs/Quickstart/disk-space.md create mode 100644 content/en/docs/Quickstart/en-us_bookmap_0241499761.md create mode 100644 content/en/docs/Quickstart/error-reporting-and-logging.md create mode 100644 content/en/docs/Quickstart/establishing-mutual-trust-manually.md create mode 100644 content/en/docs/Quickstart/examples-0.md create mode 100644 content/en/docs/Quickstart/examples.md create mode 100644 content/en/docs/Quickstart/executing-an-sql-statement.md create mode 100644 content/en/docs/Quickstart/executing-files-in-batches.md create mode 100644 content/en/docs/Quickstart/executing-installation.md create mode 100644 content/en/docs/Quickstart/executing-sql-statements-using-jdbc.md create mode 100644 content/en/docs/Quickstart/executing-sql-statements-using-the-client-tool.md create mode 100644 content/en/docs/Quickstart/faqs.md create mode 100644 content/en/docs/Quickstart/fault-tolerance.md create mode 100644 content/en/docs/Quickstart/figures/installation-process.png create mode 100644 "content/en/docs/Quickstart/figures/opengauss\347\275\221\347\273\234\347\273\204\347\275\221\347\244\272\344\276\213.png" create mode 100644 "content/en/docs/Quickstart/figures/opengauss\351\200\273\350\276\221\346\236\266\346\236\204\345\233\2761.png" create mode 100644 content/en/docs/Quickstart/figures/typical-networking.png create mode 100644 content/en/docs/Quickstart/file-location.md create mode 100644 content/en/docs/Quickstart/genetic-query-optimizer.md create mode 100644 content/en/docs/Quickstart/granting-permissions.md create mode 100644 content/en/docs/Quickstart/guc-parameter-usage.md create mode 100644 content/en/docs/Quickstart/guc-parameters.md create mode 100644 content/en/docs/Quickstart/ha-replication.md create mode 100644 content/en/docs/Quickstart/initial-configuration.md create mode 100644 content/en/docs/Quickstart/initializing-the-installation-environment.md create mode 100644 content/en/docs/Quickstart/installation-process.md create mode 100644 content/en/docs/Quickstart/installation-user-and-user-group.md create mode 100644 content/en/docs/Quickstart/installing-the-gsql-client-and-connecting-to-a-database.md create mode 100644 content/en/docs/Quickstart/installing-the-opengauss.md create mode 100644 content/en/docs/Quickstart/jdbc-package-and-driver-class.md create mode 100644 content/en/docs/Quickstart/kernel-resource-usage.md create mode 100644 content/en/docs/Quickstart/learning-product-knowledge.md create mode 100644 content/en/docs/Quickstart/load-management.md create mode 100644 content/en/docs/Quickstart/loading-the-driver.md create mode 100644 content/en/docs/Quickstart/lock-management.md create mode 100644 content/en/docs/Quickstart/log-replay.md create mode 100644 content/en/docs/Quickstart/logging-content.md create mode 100644 content/en/docs/Quickstart/logging-destination.md create mode 100644 content/en/docs/Quickstart/logging-time.md create mode 100644 content/en/docs/Quickstart/memory.md create mode 100644 content/en/docs/Quickstart/miscellaneous-parameters.md create mode 100644 content/en/docs/Quickstart/modifying-os-configuration.md create mode 100644 content/en/docs/Quickstart/obtaining-and-verifying-an-installation-package.md create mode 100644 content/en/docs/Quickstart/opengauss-transaction.md create mode 100644 content/en/docs/Quickstart/operation-auditing.md create mode 100644 content/en/docs/Quickstart/optimizer-cost-constants.md create mode 100644 content/en/docs/Quickstart/optimizer-method-configuration.md create mode 100644 content/en/docs/Quickstart/other-default-parameters.md create mode 100644 content/en/docs/Quickstart/other-optimizer-options.md create mode 100644 content/en/docs/Quickstart/parallel-data-import.md create mode 100644 content/en/docs/Quickstart/performance-statistics.md create mode 100644 content/en/docs/Quickstart/platform-and-client-compatibility.md create mode 100644 content/en/docs/Quickstart/preparing-for-installation.md create mode 100644 content/en/docs/Quickstart/preparing-the-software-and-hardware-installation-environment.md create mode 100644 content/en/docs/Quickstart/primary-server.md create mode 100644 content/en/docs/Quickstart/product-features.md create mode 100644 content/en/docs/Quickstart/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Quickstart/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Quickstart/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Quickstart/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Quickstart/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Quickstart/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Quickstart/query-and-index-statistics-collector.md create mode 100644 content/en/docs/Quickstart/query-planning.md create mode 100644 content/en/docs/Quickstart/query.md create mode 100644 content/en/docs/Quickstart/resetting-parameters.md create mode 100644 content/en/docs/Quickstart/resource-consumption.md create mode 100644 content/en/docs/Quickstart/security-and-authentication-(postgresql-conf).md create mode 100644 content/en/docs/Quickstart/sending-server.md create mode 100644 content/en/docs/Quickstart/setting-a-client-authentication-policy.md create mode 100644 content/en/docs/Quickstart/setting-character-set-parameters.md create mode 100644 content/en/docs/Quickstart/setting-remote-login-of-user-root.md create mode 100644 content/en/docs/Quickstart/setting-the-nic-mtu.md create mode 100644 content/en/docs/Quickstart/setting-the-time-zone-and-time.md create mode 100644 content/en/docs/Quickstart/settings.md create mode 100644 content/en/docs/Quickstart/simple-data-management.md create mode 100644 content/en/docs/Quickstart/simple-permission-management.md create mode 100644 content/en/docs/Quickstart/software-and-hardware-requirements.md create mode 100644 content/en/docs/Quickstart/software-architecture.md create mode 100644 content/en/docs/Quickstart/standby-server.md create mode 100644 content/en/docs/Quickstart/statement-behavior.md create mode 100644 content/en/docs/Quickstart/statistics-during-the-database-running.md create mode 100644 content/en/docs/Quickstart/system-performance-snapshot.md create mode 100644 content/en/docs/Quickstart/typical-networking.md create mode 100644 content/en/docs/Quickstart/upgrade-parameters.md create mode 100644 content/en/docs/Quickstart/user-and-permission-audit.md create mode 100644 content/en/docs/Quickstart/using-csv-log-output.md create mode 100644 content/en/docs/Quickstart/using-opengauss.md create mode 100644 content/en/docs/Quickstart/using-the-gsql-client-for-connection.md create mode 100644 content/en/docs/Quickstart/verifying-the-installation.md create mode 100644 content/en/docs/Quickstart/version-and-platform-compatibility.md create mode 100644 content/en/docs/Quickstart/viewing-objects.md create mode 100644 content/en/docs/Quickstart/viewing-parameter-values.md create mode 100644 content/en/docs/Quickstart/wait-events.md create mode 100644 content/en/docs/Quickstart/what-should-i-do-if-mutual-trust-between-nodes-in-opengauss-is-lost.md create mode 100644 content/en/docs/Quickstart/write-ahead-log.md create mode 100644 content/en/docs/Quickstart/zone-and-formatting.md create mode 100644 content/en/docs/Releasenotes/Releasenotes.md create mode 100644 content/en/docs/Releasenotes/Terms of Use.md create mode 100644 content/en/docs/Releasenotes/acknowledgement.md create mode 100644 content/en/docs/Releasenotes/common-vulnerabilities-and-exposures.md create mode 100644 content/en/docs/Releasenotes/contribution.md create mode 100644 content/en/docs/Releasenotes/feature-introduction.md create mode 100644 content/en/docs/Releasenotes/important-notes.md create mode 100644 content/en/docs/Releasenotes/known-issues.md create mode 100644 content/en/docs/Releasenotes/new-features.md create mode 100644 content/en/docs/Releasenotes/optimized-features.md create mode 100644 content/en/docs/Releasenotes/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Releasenotes/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Releasenotes/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Releasenotes/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Releasenotes/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Releasenotes/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Releasenotes/resolved-issues.md create mode 100644 content/en/docs/Releasenotes/source-code.md create mode 100644 content/en/docs/Releasenotes/user-notice.md create mode 100644 content/en/docs/Releasenotes/version-introduction.md create mode 100644 content/en/docs/Technicalwhitepaper/Technicalwhitepaper.md create mode 100644 content/en/docs/Technicalwhitepaper/access-control.md create mode 100644 content/en/docs/Technicalwhitepaper/adaptive-compression.md create mode 100644 content/en/docs/Technicalwhitepaper/application-scenario.md create mode 100644 content/en/docs/Technicalwhitepaper/basic-functions-oriented-to-application-development.md create mode 100644 content/en/docs/Technicalwhitepaper/cbo-optimizer.md create mode 100644 content/en/docs/Technicalwhitepaper/common-concepts.md create mode 100644 content/en/docs/Technicalwhitepaper/copy-interface-for-error-tolerance.md create mode 100644 content/en/docs/Technicalwhitepaper/core-database-technologies.md create mode 100644 content/en/docs/Technicalwhitepaper/database-audit.md create mode 100644 content/en/docs/Technicalwhitepaper/database-encryption-authentication.md create mode 100644 content/en/docs/Technicalwhitepaper/database-security.md create mode 100644 content/en/docs/Technicalwhitepaper/deployment-modes.md create mode 100644 content/en/docs/Technicalwhitepaper/deployment-solutions.md create mode 100644 content/en/docs/Technicalwhitepaper/deployment-with-one-primary-and-multiple-standbys.md create mode 100644 content/en/docs/Technicalwhitepaper/figures/column-store.png create mode 100644 content/en/docs/Technicalwhitepaper/figures/database-management-and-storage-network.png create mode 100644 content/en/docs/Technicalwhitepaper/figures/en-us_image_0253069486.png create mode 100644 content/en/docs/Technicalwhitepaper/figures/en-us_image_0253141769.png create mode 100644 content/en/docs/Technicalwhitepaper/figures/kunpeng-numa-architecture-optimization.png create mode 100644 content/en/docs/Technicalwhitepaper/figures/one-primary-and-multiple-standby-deployment.png create mode 100644 content/en/docs/Technicalwhitepaper/figures/opengauss-logical-components.png create mode 100644 content/en/docs/Technicalwhitepaper/figures/primary-standby-deployment.png create mode 100644 content/en/docs/Technicalwhitepaper/glossary.md create mode 100644 content/en/docs/Technicalwhitepaper/ha.md create mode 100644 content/en/docs/Technicalwhitepaper/hardware-requirements.md create mode 100644 content/en/docs/Technicalwhitepaper/high-concurrency-of-the-thread-pool.md create mode 100644 content/en/docs/Technicalwhitepaper/high-performance.md create mode 100644 content/en/docs/Technicalwhitepaper/high-scalability.md create mode 100644 content/en/docs/Technicalwhitepaper/hybrid-row-column-storage.md create mode 100644 content/en/docs/Technicalwhitepaper/introduction-to-deployment-solutions.md create mode 100644 content/en/docs/Technicalwhitepaper/kunpeng-numa-architecture-optimization.md create mode 100644 content/en/docs/Technicalwhitepaper/logical-backup.md create mode 100644 content/en/docs/Technicalwhitepaper/maintainability.md create mode 100644 content/en/docs/Technicalwhitepaper/network-communication-security.md create mode 100644 content/en/docs/Technicalwhitepaper/one-click-diagnosis-information-collection.md create mode 100644 content/en/docs/Technicalwhitepaper/partitioning.md create mode 100644 content/en/docs/Technicalwhitepaper/pg-interface-compatibility.md create mode 100644 content/en/docs/Technicalwhitepaper/physical-backup.md create mode 100644 content/en/docs/Technicalwhitepaper/primary-standby-deployment.md create mode 100644 content/en/docs/Technicalwhitepaper/primary-standby.md create mode 100644 content/en/docs/Technicalwhitepaper/product-positioning.md create mode 100644 content/en/docs/Technicalwhitepaper/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Technicalwhitepaper/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Technicalwhitepaper/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Technicalwhitepaper/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Technicalwhitepaper/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Technicalwhitepaper/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Technicalwhitepaper/row-level-access-control.md create mode 100644 content/en/docs/Technicalwhitepaper/separation-of-control-and-access-permissions.md create mode 100644 content/en/docs/Technicalwhitepaper/slow-sql-diagnosis.md create mode 100644 content/en/docs/Technicalwhitepaper/software-and-hardware-requirements.md create mode 100644 content/en/docs/Technicalwhitepaper/software-architecture.md create mode 100644 content/en/docs/Technicalwhitepaper/software-requirements.md create mode 100644 content/en/docs/Technicalwhitepaper/sql-bypass.md create mode 100644 content/en/docs/Technicalwhitepaper/sql-hints.md create mode 100644 content/en/docs/Technicalwhitepaper/standalone-deployment.md create mode 100644 content/en/docs/Technicalwhitepaper/standard-development-interfaces.md create mode 100644 content/en/docs/Technicalwhitepaper/standard-sql.md create mode 100644 content/en/docs/Technicalwhitepaper/support-for-functions-and-stored-procedures.md create mode 100644 content/en/docs/Technicalwhitepaper/technical-characteristics.md create mode 100644 content/en/docs/Technicalwhitepaper/technical-specifications.md create mode 100644 content/en/docs/Technicalwhitepaper/transaction-support.md create mode 100644 content/en/docs/Technicalwhitepaper/typical-networking.md create mode 100644 content/en/docs/Technicalwhitepaper/workload-diagnosis-report.md create mode 100644 content/en/docs/Toolreference/Toolreference.md create mode 100644 content/en/docs/Toolreference/an-error-is-reported-displaying-failed-to-obtain-the-gphome-when-a-command-is-executed.md create mode 100644 content/en/docs/Toolreference/client-tool.md create mode 100644 content/en/docs/Toolreference/command-reference-1.md create mode 100644 content/en/docs/Toolreference/command-reference.md create mode 100644 content/en/docs/Toolreference/faqs-2.md create mode 100644 content/en/docs/Toolreference/faqs.md create mode 100644 content/en/docs/Toolreference/gaussdb.md create mode 100644 content/en/docs/Toolreference/gs_backup.md create mode 100644 content/en/docs/Toolreference/gs_basebackup.md create mode 100644 content/en/docs/Toolreference/gs_check.md create mode 100644 content/en/docs/Toolreference/gs_checkos.md create mode 100644 content/en/docs/Toolreference/gs_checkperf.md create mode 100644 content/en/docs/Toolreference/gs_collector.md create mode 100644 content/en/docs/Toolreference/gs_ctl.md create mode 100644 content/en/docs/Toolreference/gs_dump.md create mode 100644 content/en/docs/Toolreference/gs_dumpall.md create mode 100644 content/en/docs/Toolreference/gs_guc.md create mode 100644 content/en/docs/Toolreference/gs_initdb.md create mode 100644 content/en/docs/Toolreference/gs_install.md create mode 100644 content/en/docs/Toolreference/gs_om.md create mode 100644 content/en/docs/Toolreference/gs_postuninstall.md create mode 100644 content/en/docs/Toolreference/gs_preinstall.md create mode 100644 content/en/docs/Toolreference/gs_restore.md create mode 100644 content/en/docs/Toolreference/gs_ssh.md create mode 100644 content/en/docs/Toolreference/gs_sshexkey.md create mode 100644 content/en/docs/Toolreference/gs_uninstall.md create mode 100644 content/en/docs/Toolreference/gs_upgradectl.md create mode 100644 content/en/docs/Toolreference/gsql.md create mode 100644 content/en/docs/Toolreference/gstrace.md create mode 100644 content/en/docs/Toolreference/kadmin-local.md create mode 100644 content/en/docs/Toolreference/kdb5_util.md create mode 100644 content/en/docs/Toolreference/kdestroy.md create mode 100644 content/en/docs/Toolreference/kinit.md create mode 100644 content/en/docs/Toolreference/klist.md create mode 100644 content/en/docs/Toolreference/krb5kdc.md create mode 100644 content/en/docs/Toolreference/meta-command-reference.md create mode 100644 content/en/docs/Toolreference/obtaining-help-information.md create mode 100644 content/en/docs/Toolreference/overview-0.md create mode 100644 content/en/docs/Toolreference/overview.md create mode 100644 content/en/docs/Toolreference/pg_config.md create mode 100644 content/en/docs/Toolreference/pg_controldata.md create mode 100644 content/en/docs/Toolreference/pg_resetxlog.md create mode 100644 content/en/docs/Toolreference/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/Toolreference/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/Toolreference/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/Toolreference/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/Toolreference/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/Toolreference/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/Toolreference/restoration-method-for-incomplete-key-files-caused-by-interruption-during-standby-instance-rebuildin.md create mode 100644 content/en/docs/Toolreference/server-tools.md create mode 100644 content/en/docs/Toolreference/system-catalogs-and-views-supported-by-gs_collector.md create mode 100644 content/en/docs/Toolreference/tool-overview.md create mode 100644 content/en/docs/Toolreference/tools-used-in-the-internal-system.md create mode 100644 content/en/docs/Toolreference/usage-guide.md create mode 100644 content/en/docs/Toolreference/usage-guidelines.md create mode 100644 content/en/docs/installation/(optional)-setting-the-standby-node-to-readable.md create mode 100644 content/en/docs/installation/checking-the-health-status.md create mode 100644 content/en/docs/installation/configuration-file-for-primary-secondary-deployment.md create mode 100644 content/en/docs/installation/configuration-file-for-single-instance-deployment.md create mode 100644 content/en/docs/installation/configuring-os-parameters.md create mode 100644 content/en/docs/installation/configuring-primary-database-node-information.md create mode 100644 content/en/docs/installation/configuring-the-basic-host-information.md create mode 100644 content/en/docs/installation/configuring-the-database-name-and-directories.md create mode 100644 content/en/docs/installation/configuring-the-locale-and-character-set.md create mode 100644 content/en/docs/installation/creating-a-configuration-file.md create mode 100644 content/en/docs/installation/creating-the-required-user-account-and-configuring-the-installation-environment.md create mode 100644 content/en/docs/installation/deleting-cluster-configurations.md create mode 100644 content/en/docs/installation/disabling-the-os-firewall.md create mode 100644 content/en/docs/installation/disabling-the-swap-memory.md create mode 100644 content/en/docs/installation/establishing-mutual-trust-manually.md create mode 100644 content/en/docs/installation/examples.md create mode 100644 content/en/docs/installation/executing-installation.md create mode 100644 content/en/docs/installation/executing-uninstallation.md create mode 100644 content/en/docs/installation/figures/installation-process.png create mode 100644 content/en/docs/installation/initial-configuration.md create mode 100644 content/en/docs/installation/initializing-the-installation-environment.md create mode 100644 content/en/docs/installation/installation-overview.md create mode 100644 content/en/docs/installation/installation-process.md create mode 100644 content/en/docs/installation/installation-user-and-user-group.md create mode 100644 content/en/docs/installation/installing-the-opengauss.md create mode 100644 content/en/docs/installation/modifying-os-configuration.md create mode 100644 content/en/docs/installation/obtaining-and-verifying-an-installation-package.md create mode 100644 content/en/docs/installation/preparing-for-installation.md create mode 100644 content/en/docs/installation/preparing-the-software-and-hardware-installation-environment.md create mode 100644 content/en/docs/installation/public_sys-resources/icon-caution.gif create mode 100644 content/en/docs/installation/public_sys-resources/icon-danger.gif create mode 100644 content/en/docs/installation/public_sys-resources/icon-note.gif create mode 100644 content/en/docs/installation/public_sys-resources/icon-notice.gif create mode 100644 content/en/docs/installation/public_sys-resources/icon-tip.gif create mode 100644 content/en/docs/installation/public_sys-resources/icon-warning.gif create mode 100644 content/en/docs/installation/setting-character-set-parameters.md create mode 100644 content/en/docs/installation/setting-remote-login-of-user-root.md create mode 100644 content/en/docs/installation/setting-the-nic-mtu.md create mode 100644 content/en/docs/installation/setting-the-time-zone-and-time.md create mode 100644 content/en/docs/installation/software-and-hardware-requirements.md create mode 100644 content/en/docs/installation/uninstalling-the-opengauss.md create mode 100644 content/en/docs/installation/verifying-the-installation.md create mode 100644 "content/zh/docs/Developerguide/ARM\346\234\215\345\212\241\345\231\250\344\274\230\345\214\226\346\214\207\345\257\274.md" create mode 100644 "content/zh/docs/Developerguide/MOT\351\231\220\345\210\266.md" create mode 100644 content/zh/docs/Developerguide/Query-24.md create mode 100644 "content/zh/docs/Developerguide/X86\346\234\215\345\212\241\345\231\250\344\274\230\345\214\226\346\214\207\345\257\274.md" create mode 100644 "content/zh/docs/Developerguide/figures/openGauss\344\270\255\347\232\204\345\206\205\345\255\230\345\274\225\346\223\216.png" create mode 100644 "content/zh/docs/Developerguide/\344\273\213\347\273\215-19.md" create mode 100644 "content/zh/docs/Developerguide/\345\206\205\345\255\230-23.md" create mode 100644 "content/zh/docs/Developerguide/\345\206\205\345\255\230\350\241\250\344\275\277\347\224\250\346\214\207\345\257\274.md" create mode 100644 "content/zh/docs/Developerguide/\345\206\205\345\255\230\350\241\250\345\205\270\345\236\213\345\272\224\347\224\250.md" create mode 100644 "content/zh/docs/Developerguide/\345\206\205\345\255\230\350\241\250\346\212\200\346\234\257\344\273\213\347\273\215.md" create mode 100644 "content/zh/docs/Developerguide/\345\206\205\345\255\230\350\241\250\347\211\271\346\200\247.md" create mode 100644 "content/zh/docs/Developerguide/\345\210\233\345\273\272-\345\210\240\351\231\244\345\206\205\345\255\230\350\241\250\345\222\214\347\264\242\345\274\225.md" create mode 100644 "content/zh/docs/Developerguide/\345\255\230\345\202\250\350\277\207\347\250\213-20.md" create mode 100644 "content/zh/docs/Developerguide/\345\270\270\350\247\201\351\227\256\351\242\230\345\244\204\347\220\206-17.md" create mode 100644 "content/zh/docs/Developerguide/\346\225\260\346\215\256\347\261\273\345\236\213-21.md" create mode 100644 "content/zh/docs/Developerguide/\346\246\202\350\277\260-18.md" create mode 100644 "content/zh/docs/Developerguide/\350\260\203\350\257\225-22.md" create mode 100644 "content/zh/docs/Developerguide/\351\203\250\347\275\262.md" create mode 100644 "content/zh/docs/Developerguide/\351\205\215\347\275\256\345\207\206\345\244\207.md" create mode 100644 "content/zh/docs/Quickstart/\345\206\205\345\255\230\350\241\250.md" diff --git a/content/en/docs.lnk b/content/en/docs.lnk new file mode 100644 index 0000000000000000000000000000000000000000..1779c367a67f42d9b23fa78eeeb15cb8df6a4c8e GIT binary patch literal 890 zcmeZaU|?VrVFHp23ViDak0pARq9+|AN9Rt4x7KH#iuy4GiW=GU&Tm#lRII7=8?fV9t#I?kE9<@cg1| z1_mnzP6if+_xtt)oRhj6pi`pdpVUq+y3edD<2B0<+)4Zb5O}i1$76miy_atnlyU!a>?Vws(mn8~nh!QTz>Rsw$g{bm=Pj~z@-OKEqP-StXv z4oiW|^XX@q3CYI;0|*%63w(iQ8wLjizctu<%bB6meff_I!b^(NltAuJ0a+{$#Qb0m zB=8g%G#K1~(HO%3iyTdmGBBtCk|4tjKB{hi?=B^<^dnP3&%D=bWP##bAQd3PSb-Q3 z6OhmWS-{``BJ4E4DN+*1WC8I(IzWu})Ng*4kr35 + +After the audit function is enabled, a large number of audit logs will be generated, which occupy large storage space. You can customize an audit log maintenance policy based on the size of available storage space. + +For details, see "Database Security Management \> Configuring Database Audit \> Maintaining Audit Logs" in the _Developer Guide_. + diff --git a/content/en/docs/Administratorguide/backup-and-restoration.md b/content/en/docs/Administratorguide/backup-and-restoration.md new file mode 100644 index 000000000..be708caf9 --- /dev/null +++ b/content/en/docs/Administratorguide/backup-and-restoration.md @@ -0,0 +1,9 @@ +# Backup and Restoration + +- **[Overview](overview.md)** + +- **[Physical Backup and Restoration](physical-backup-and-restoration.md)** + +- **[Logical Backup and Restoration](logical-backup-and-restoration.md)** + + diff --git a/content/en/docs/Administratorguide/check-method-0.md b/content/en/docs/Administratorguide/check-method-0.md new file mode 100644 index 000000000..09d7f31a8 --- /dev/null +++ b/content/en/docs/Administratorguide/check-method-0.md @@ -0,0 +1,350 @@ +# Check Method + +Use the **gs\_check** tool provided by openGauss to check the openGauss health status. + +## Precautions + +- Only user **root** is authorized to check new nodes added during cluster scale-out. In other cases, the check can be performed only by user **omm**. +- Parameter **-i** or **-e** must be set. **-i** specifies a single item to be checked, and **-e** specifies an inspection scenario where multiple items will be checked. +- If **-i** is not set to a **root** item or no such items are contained in the check item list of the scenario specified by **-e**, you do not need to enter the name or password of user **root**. +- You can run **--skip-root-items** to skip **root** items. +- Check the consistency between the new node and existing nodes. Run the **gs\_check** command on an existing node and specify the **--hosts** parameter. The IP address of the new node needs to be written into the **hosts** file. + +## Procedure + +Method 1: + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to check the openGauss database status: + + ``` + gs_check -i CheckClusterState + ``` + + In the command, **-i** indicates the check item and is case-sensitive. The format is **-i CheckClusterState**, **-i CheckCPU** or **-i CheckClusterState,CheckCPU**. + + Checkable items are listed in "Server Tools \> gs\_check \> openGauss status checks" in the _openGauss Tool Reference_. You can create a check item as needed. + + +Method 2: + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to check the openGauss database health status: + + ``` + gs_check -e inspect + ``` + + In the command, **-e** indicates the inspection scenario and is case-sensitive. The format is **-e inspect** or **-e upgrade**. + + The inspection scenarios include **inspect** \(routine inspection\), **upgrade** \(inspection before upgrade\), **expand** \(inspection before cluster scale-out\), **binary\_upgrade** \(inspection before in-place upgrade\), and **health** \(health inspection\). You can create an inspection scenario as needed. + + +Method 3: + +1. Log in as the OS user **omm** to the primary node of the database. +2. Copy the inspection tool **gs\_check** and the **inspection** directory to all new hosts. +3. Write the IP addresses of the new hosts into the **ipListFile** file and separate the IP addresses with line feeds. +4. Run the following command to check the new nodes before cluster scale-out: + + ``` + gs_check -e expand_new_node --hosts ipListFile + ``` + + **-e** must be set to **expand\_new\_node**, indicating inspection before cluster scale-out. + + +The openGauss inspection is performed to check openGauss status during openGauss running or to check the environment and conditions before critical operations, such as upgrade or scale-out. For details about the inspection items and scenarios, see "Server Tools \> gs\_check \> openGauss status checks" in the _openGauss Tool Reference_. + +## Examples + +Check result of a single item: + +``` +perfadm@lfgp000700749:/opt/huawei/perfadm/tool/script> gs_check -i CheckCPU +Parsing the check items config file successfully +Distribute the context file to remote hosts successfully +Start to health check for the cluster. Total Items:1 Nodes:3 + +Checking... [=========================] 1/1 +Start to analysis the check result +CheckCPU....................................OK +The item run on 3 nodes. success: 3 + +Analysis the check result successfully +Success. All check items run completed. Total:1 Success:1 Failed:0 +For more information please refer to /opt/huawei/wisequery/script/gspylib/inspection/output/CheckReport_201902193704661604.tar.gz +``` + +Local execution result: + +``` +perfadm@lfgp000700749:/opt/huawei/perfadm/tool/script> gs_check -i CheckCPU -L + +2017-12-29 17:09:29 [NAM] CheckCPU +2017-12-29 17:09:29 [STD] Check the CPU usage of the host. If the value of idle is greater than 30% and the value of iowait is less than 30%, this item passes the check. Otherwise, this item fails the check. +2017-12-29 17:09:29 [RST] OK + +2017-12-29 17:09:29 [RAW] +Linux 4.4.21-69-default (lfgp000700749) 12/29/17 _x86_64_ + +17:09:24 CPU %user %nice %system %iowait %steal %idle +17:09:25 all 0.25 0.00 0.25 0.00 0.00 99.50 +17:09:26 all 0.25 0.00 0.13 0.00 0.00 99.62 +17:09:27 all 0.25 0.00 0.25 0.13 0.00 99.37 +17:09:28 all 0.38 0.00 0.25 0.00 0.13 99.25 +17:09:29 all 1.00 0.00 0.88 0.00 0.00 98.12 +Average: all 0.43 0.00 0.35 0.03 0.03 99.17 +``` + +Check result of a scenario: + +``` +[perfadm@SIA1000131072 Check]$ gs_check -e inspect +Parsing the check items config file successfully +The below items require root privileges to execute:[CheckBlockdev CheckIOrequestqueue CheckIOConfigure CheckCheckMultiQueue CheckFirewall CheckSshdService CheckSshdConfig CheckCrondService CheckNoCheckSum CheckSctpSeProcMemory CheckBootItems CheckFilehandle CheckNICModel CheckDropCache] +Please enter root privileges user[root]:root +Please enter password for user[root]: +Please enter password for user[root] on the node[10.244.57.240]: +Check root password connection successfully +Distribute the context file to remote hosts successfully +Start to health check for the cluster. Total Items:59 Nodes:2 + +Checking... [ ] 21/59 +Checking... [=========================] 59/59 +Start to analysis the check result +CheckClusterState...........................OK +The item run on 2 nodes. success: 2 + +CheckDBParams...............................OK +The item run on 1 nodes. success: 1 + +CheckDebugSwitch............................OK +The item run on 2 nodes. success: 2 + +CheckDirPermissions.........................OK +The item run on 2 nodes. success: 2 + +CheckReadonlyMode...........................OK +The item run on 1 nodes. success: 1 + +CheckEnvProfile.............................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +GAUSSHOME /usr1/gaussdb/app +LD_LIBRARY_PATH /usr1/gaussdb/app/lib +PATH /usr1/gaussdb/app/bin + + +CheckBlockdev...............................OK +The item run on 2 nodes. success: 2 + +CheckCurConnCount...........................OK +The item run on 1 nodes. success: 1 + +CheckCursorNum..............................OK +The item run on 1 nodes. success: 1 + +CheckPgxcgroup..............................OK +The item run on 1 nodes. success: 1 + +CheckDiskFormat.............................OK +The item run on 2 nodes. success: 2 + +CheckSpaceUsage.............................OK +The item run on 2 nodes. success: 2 + +CheckInodeUsage.............................OK +The item run on 2 nodes. success: 2 + +CheckSwapMemory.............................OK +The item run on 2 nodes. success: 2 + +CheckLogicalBlock...........................OK +The item run on 2 nodes. success: 2 + +CheckIOrequestqueue.....................WARNING +The item run on 2 nodes. warning: 2 +The warning[host240,host157] value: +On device (vdb) 'IO Request' RealValue '256' ExpectedValue '32768' +On device (vda) 'IO Request' RealValue '256' ExpectedValue '32768' + +CheckMaxAsyIOrequests.......................OK +The item run on 2 nodes. success: 2 + +CheckIOConfigure............................OK +The item run on 2 nodes. success: 2 + +CheckMTU....................................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +1500 + +CheckPing...................................OK +The item run on 2 nodes. success: 2 + +CheckRXTX...................................NG +The item run on 2 nodes. ng: 2 +The ng[host240,host157] value: +NetWork[eth0] +RX: 256 +TX: 256 + + +CheckNetWorkDrop............................OK +The item run on 2 nodes. success: 2 + +CheckMultiQueue.............................OK +The item run on 2 nodes. success: 2 + +CheckEncoding...............................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +LANG=en_US.UTF-8 + +CheckFirewall...............................OK +The item run on 2 nodes. success: 2 + +CheckKernelVer..............................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +3.10.0-957.el7.x86_64 + +CheckMaxHandle..............................OK +The item run on 2 nodes. success: 2 + +CheckNTPD...................................OK +host240: NTPD service is running, 2020-06-02 17:00:28 +host157: NTPD service is running, 2020-06-02 17:00:06 + + +CheckOSVer..................................OK +host240: The current OS is centos 7.6 64bit. +host157: The current OS is centos 7.6 64bit. + + +CheckSysParams..........................WARNING +The item run on 2 nodes. warning: 2 +The warning[host240,host157] value: +Warning reason: variable 'net.ipv4.tcp_retries1' RealValue '3' ExpectedValue '5'. +Warning reason: variable 'net.ipv4.tcp_syn_retries' RealValue '6' ExpectedValue '5'. +Warning reason: variable 'net.sctp.path_max_retrans' RealValue '5' ExpectedValue '10'. +Warning reason: variable 'net.sctp.max_init_retransmits' RealValue '8' ExpectedValue '10'. + + +CheckTHP....................................OK +The item run on 2 nodes. success: 2 + +CheckTimeZone...............................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: ++0800 + +CheckCPU....................................OK +The item run on 2 nodes. success: 2 + +CheckSshdService............................OK +The item run on 2 nodes. success: 2 + +CheckSshdConfig.........................WARNING +The item run on 2 nodes. warning: 2 +The warning[host240,host157] value: + +Warning reason: UseDNS parameter is not set; expected: no + +CheckCrondService...........................OK +The item run on 2 nodes. success: 2 + +CheckStack..................................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +8192 + +CheckNoCheckSum.............................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +Nochecksum value is N,Check items pass. + +CheckSysPortRange...........................OK +The item run on 2 nodes. success: 2 + +CheckMemInfo................................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +totalMem: 31.260929107666016G + +CheckHyperThread............................OK +The item run on 2 nodes. success: 2 + +CheckTableSpace.............................OK +The item run on 1 nodes. success: 1 + +CheckSctpService............................OK +The item run on 2 nodes. success: 2 + +CheckSysadminUser...........................OK +The item run on 1 nodes. success: 1 + +CheckGUCConsistent..........................OK +All DN instance guc value is consistent. + +CheckMaxProcMemory..........................OK +The item run on 1 nodes. success: 1 + +CheckBootItems..............................OK +The item run on 2 nodes. success: 2 + +CheckHashIndex..............................OK +The item run on 1 nodes. success: 1 + +CheckPgxcRedistb............................OK +The item run on 1 nodes. success: 1 + +CheckNodeGroupName..........................OK +The item run on 1 nodes. success: 1 + +CheckTDDate.................................OK +The item run on 1 nodes. success: 1 + +CheckDilateSysTab...........................OK +The item run on 1 nodes. success: 1 + +CheckKeyProAdj..............................OK +The item run on 2 nodes. success: 2 + +CheckProStartTime.......................WARNING +host157: +STARTED COMMAND +Tue Jun 2 16:57:18 2020 /usr1/dmuser/dmserver/metricdb1/server/bin/gaussdb --single_node -D /usr1/dmuser/dmb1/data -p 22204 +Mon Jun 1 16:15:15 2020 /usr1/gaussdb/app/bin/gaussdb -D /usr1/gaussdb/data/dn1 -M standby + + +CheckFilehandle.............................OK +The item run on 2 nodes. success: 2 + +CheckRouting................................OK +The item run on 2 nodes. success: 2 + +CheckNICModel...............................OK +The item run on 2 nodes. success: 2 (consistent) +The success on all nodes value: +version: 1.0.0 +model: Red Hat, Inc. Virtio network device + + +CheckDropCache..........................WARNING +The item run on 2 nodes. warning: 2 +The warning[host240,host157] value: +No DropCache process is running + +CheckMpprcFile..............................NG +The item run on 2 nodes. ng: 2 +The ng[host240,host157] value: +There is no mpprc file + +Analysis the check result successfully +Failed. All check items run completed. Total:59 Success:52 Warning:5 NG:2 +For more information please refer to /usr1/gaussdb/tool/script/gspylib/inspection/output/CheckReport_inspect611.tar.gz + +``` + diff --git a/content/en/docs/Administratorguide/check-method-2.md b/content/en/docs/Administratorguide/check-method-2.md new file mode 100644 index 000000000..91c4f5576 --- /dev/null +++ b/content/en/docs/Administratorguide/check-method-2.md @@ -0,0 +1,40 @@ +# Check Method + +Use the **gs\_checkperf** tool provided by openGauss to check hardware performance. + +## Prerequisites + +- openGauss is running properly. +- Services are running properly on the database. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to check the openGauss database performance: + + ``` + gs_checkperf + ``` + + +For details about performance statistical items, see "Server Tools \> gs\_checkperf \> Performance Check Items" in the _openGauss Tool Reference_. + +## Examples + +Simple performance statistical result is displayed on the screen as follows: + +``` +gs_checkperf -i pmk -U omm +Cluster statistics information: + Host CPU busy time ratio : 1.43 % + MPPDB CPU time % in busy time : 1.88 % + Shared Buffer Hit ratio : 99.96 % + In-memory sort ratio : 100.00 % + Physical Reads : 4 + Physical Writes : 25 + DB size : 70 MB + Total Physical writes : 25 + Active SQL count : 2 + Session count : 3 +``` + diff --git a/content/en/docs/Administratorguide/check-method.md b/content/en/docs/Administratorguide/check-method.md new file mode 100644 index 000000000..8af1051f9 --- /dev/null +++ b/content/en/docs/Administratorguide/check-method.md @@ -0,0 +1,60 @@ +# Check Method + +Use the **gs\_checkos** tool provided by openGauss to check the OS status. + +## Prerequisites + +- The hardware and network are working properly. +- The trust relationship of user **root** among the hosts is normal. +- Only user **root** is authorized to run the **gs\_checkos** command. + +## Procedure + +1. Log in to a server as user **root**. +2. Run the following command to check OS parameters of servers where the openGauss nodes are deployed: + + ``` + gs_checkos -i A + ``` + + Check the OS parameters to ensure that openGauss has passed the pre-installation check and can efficiently operate after it is installed. For details about the check items, see "Server Tools \> gs\_checkos" in the _openGauss Tool Reference_. + + +## Examples + +Before running the **gs\_checkos** command, execute pre-processing scripts by running **gs\_preinstall** to prepare the environment. The following uses parameter **A** as an example: + +``` +gs_checkos -i A +Checking items: + A1. [ OS version status ] : Normal + A2. [ Kernel version status ] : Normal + A3. [ Unicode status ] : Normal + A4. [ Time zone status ] : Normal + A5. [ Swap memory status ] : Normal + A6. [ System control parameters status ] : Normal + A7. [ File system configuration status ] : Normal + A8. [ Disk configuration status ] : Normal + A9. [ Pre-read block size status ] : Normal + A10.[ IO scheduler status ] : Normal + A11.[ Network card configuration status ] : Normal + A12.[ Time consistency status ] : Warning + A13.[ Firewall service status ] : Normal + A14.[ THP service status ] : Normal +Total numbers:14. Abnormal numbers:0. Warning number:1. +``` + +The following uses parameter **B** as an example: + +``` +gs_checkos -i B +Setting items: + B1. [ Set system control parameters ] : Normal + B2. [ Set file system configuration value ] : Normal + B3. [ Set pre-read block size value ] : Normal + B4. [ Set IO scheduler value ] : Normal + B5. [ Set network card configuration value ] : Normal + B6. [ Set THP service ] : Normal +Total numbers:6. Abnormal numbers:0. Warning number:0. +``` + diff --git a/content/en/docs/Administratorguide/checking-and-deleting-logs.md b/content/en/docs/Administratorguide/checking-and-deleting-logs.md new file mode 100644 index 000000000..02282653e --- /dev/null +++ b/content/en/docs/Administratorguide/checking-and-deleting-logs.md @@ -0,0 +1,11 @@ +# Checking and Deleting Logs + +You are advised to check OS logs and database run logs monthly for monitoring system status and troubleshooting, and to delete database run logs monthly for saving disk space. + +- **[Checking OS Logs](checking-os-logs.md)** + +- **[Checking openGauss Run Logs](checking-opengauss-run-logs.md)** + +- **[Cleaning Run Logs](cleaning-run-logs.md)** + + diff --git a/content/en/docs/Administratorguide/checking-database-performance.md b/content/en/docs/Administratorguide/checking-database-performance.md new file mode 100644 index 000000000..408f5983f --- /dev/null +++ b/content/en/docs/Administratorguide/checking-database-performance.md @@ -0,0 +1,7 @@ +# Checking Database Performance + +- **[Check Method](check-method-2.md)** + +- **[Exception Handling](exception-handling-3.md)** + + diff --git a/content/en/docs/Administratorguide/checking-opengauss-health-status.md b/content/en/docs/Administratorguide/checking-opengauss-health-status.md new file mode 100644 index 000000000..df4c446b6 --- /dev/null +++ b/content/en/docs/Administratorguide/checking-opengauss-health-status.md @@ -0,0 +1,7 @@ +# Checking openGauss Health Status + +- **[Check Method](check-method-0.md)** + +- **[Exception Handling](exception-handling-1.md)** + + diff --git a/content/en/docs/Administratorguide/checking-opengauss-run-logs.md b/content/en/docs/Administratorguide/checking-opengauss-run-logs.md new file mode 100644 index 000000000..00d7433b0 --- /dev/null +++ b/content/en/docs/Administratorguide/checking-opengauss-run-logs.md @@ -0,0 +1,111 @@ +# Checking openGauss Run Logs + +A database can still run when errors occur during the execution of some operations. However, data may be inconsistent before and after the error occurrences. Therefore, you are advised to monthly check openGauss run logs to detect potential problems in time. + +## Prerequisites + +- The host used for collecting logs is running properly, and the network connection is normal. Database installation users trust each other. +- An OS tool \(for example, **gstack**\) that the log collection tool requires has been installed. If it is not installed, an error message is displayed, and this collection item is skipped. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to collect database logs: + + ``` + gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" + ``` + + In the command, **20160616 01:01** indicates the start time of the log and **20160616 23:59** indicates the end time of the log. + +3. Based on command output in [2](#en-us_topic_0237088806_en-us_topic_0059778412_l87490fc259434bc6ac7800ec9881a6ab), access the related log collection directory, decompress collected database logs, and check these logs. + + Assume that collected logs are stored in **/opt/gaussdb/tmp/gaussdba\_mppdb/collector\_20160726\_105158.tar.gz**. + + ``` + tar -xvzf /opt/gaussdb/tmp/gaussdba_mppdb/collector_20160726_105158.tar.gz + cd /opt/gaussdb/tmp/gaussdba_mppdb/collector_20160726_105158 + ``` + + +## Examples + +- Run the **gs\_collector** command together with parameters **--begin-time** and **--end-time**: + + ``` + gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" + ``` + + If information similar to the following is displayed, the logs have been archived: + + ``` + Successfully collected files + All results are stored in /tmp/gaussdba_mppdb/collector_20160616_175615.tar.gz. + ``` + +- Run the **gs\_collector** command together with parameters **--begin-time**, **--end-time**, and **-h**: + + ``` + gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -h plat2 + ``` + + If information similar to the following is displayed, the logs have been archived: + + ``` + Successfully collected files + All results are stored in /tmp/gaussdba_mppdb/collector_20160616_190225.tar.gz. + ``` + +- Run the **gs\_collector** command together with parameters **--begin-time**, **--end-time**, and **-f**: + + ``` + gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -f /opt/software/gaussdb/output + ``` + + If information similar to the following is displayed, the logs have been archived: + + ``` + Successfully collected files + All results are stored in /opt/software/gaussdb/output/collector_20160616_190511.tar.gz. + ``` + +- Run the **gs\_collector** command together with parameters **--begin-time**, **--end-time**, and **--keyword**: + + ``` + gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" --keyword="os" + ``` + + If information similar to the following is displayed, the logs have been archived: + + ``` + Successfully collected files. + All results are stored in /tmp/gaussdba_mppdb/collector_20160616_190836.tar.gz. + ``` + +- Run the **gs\_collector** command together with parameters **--begin-time**, **--end-time**, and **-o**: + + ``` + gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -o /opt/software/gaussdb/output + ``` + + If information similar to the following is displayed, the logs have been archived: + + ``` + Successfully collected files. + All results are stored in /opt/software/gaussdb/output/collector_20160726_113711.tar.gz. + ``` + +- Run the **gs\_collector** command together with parameters **--begin-time**, **--end-time**, and **-l** \(the file name extension must be .log\): + + ``` + gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -l /opt/software/gaussdb/logfile.log + ``` + + If information similar to the following is displayed, the logs have been archived: + + ``` + Successfully collected files. + All results are stored in /opt/software/gaussdb/output/collector_20160726_113711.tar.gz. + ``` + + diff --git a/content/en/docs/Administratorguide/checking-os-logs.md b/content/en/docs/Administratorguide/checking-os-logs.md new file mode 100644 index 000000000..d8142987b --- /dev/null +++ b/content/en/docs/Administratorguide/checking-os-logs.md @@ -0,0 +1,14 @@ +# Checking OS Logs + +You are advised to monthly check OS logs to detect and prevent potential OS problems. + +## Procedure + +Run the following command to check OS log files: + +``` +vim /var/log/messages +``` + +\(Pay attention to words like **kernel**, **error**, and **fatal** in logs generated within the last month and handle the problems based on the alarm information.\) + diff --git a/content/en/docs/Administratorguide/checking-os-parameters.md b/content/en/docs/Administratorguide/checking-os-parameters.md new file mode 100644 index 000000000..6e70e6b96 --- /dev/null +++ b/content/en/docs/Administratorguide/checking-os-parameters.md @@ -0,0 +1,7 @@ +# Checking OS Parameters + +- **[Check Method](check-method.md)** + +- **[Exception Handling](exception-handling.md)** + + diff --git a/content/en/docs/Administratorguide/checking-the-number-of-application-connections.md b/content/en/docs/Administratorguide/checking-the-number-of-application-connections.md new file mode 100644 index 000000000..ea50291be --- /dev/null +++ b/content/en/docs/Administratorguide/checking-the-number-of-application-connections.md @@ -0,0 +1,126 @@ +# Checking the Number of Application Connections + +If the number of connections between applications and the database exceeds the maximum value, new connections cannot be established. You are advised to daily check the number of connections, release idle connections in time, or increase the allowed maximum number of connections. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to connect to the database: + + ``` + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + If information similar to the following is displayed, the connection succeeds: + + ``` + gsql ((openGauss 1.0 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131 + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + postgres=# + ``` + +3. Run the following SQL statement to check the number of connections: + + ``` + postgres=# SELECT count(*) FROM (SELECT pg_stat_get_backend_idset() AS backendid) AS s; + ``` + + Information similar to the following is displayed. **2** indicates that two applications are connected to the database. + + ``` + count + ------- + 2 + (1 row) + ``` + +4. View the allowed maximum connections. + + ``` + postgres=# SHOW max_connections; + ``` + + Information similar to the following is displayed. **200** indicates the currently allowed maximum number of connections. + + ``` + max_connections + ----------------- + 200 + (1 row) + ``` + + +## Exception Handling + +If the number of connections in the command output is close to the value of **max\_connections** of the database, delete existing connections or change the upper limit based on site requirements. + +1. Run the following SQL statement to view information about connections whose **state** is set to **idle**, and **state\_change** column is not updated for a long time. + + ``` + postgres=# SELECT * FROM pg_stat_activity where state='idle' order by state_change; + ``` + + Information similar to the following is displayed: + + ``` + datid | datname | pid | usesysid | usename | application_name | client_addr + | client_hostname | client_port | backend_start | xact_start | quer + y_start | state_change | waiting | enqueue | state | resource_pool + | query + -------+----------+-----------------+----------+----------+------------------+--------------- + -+-----------------+-------------+-------------------------------+------------+-------------- + -----------------+-------------------------------+---------+---------+-------+--------------- + +---------------------------------------------- + 13626 | postgres | 140390162233104 | 10 | gaussdba | | + | | -1 | 2016-07-15 14:08:59.474118+08 | | 2016-07-15 14 + :09:04.496769+08 | 2016-07-15 14:09:04.496975+08 | f | | idle | default_pool + | select count(group_name) from pgxc_group; + 13626 | postgres | 140390132872976 | 10 | gaussdba | cn_5002 | 10.180.123.163 + | | 48614 | 2016-07-15 14:11:16.014871+08 | | 2016-07-15 14 + :21:17.346045+08 | 2016-07-15 14:21:17.346095+08 | f | | idle | default_pool + | SET SESSION AUTHORIZATION DEFAULT;RESET ALL; + (2 rows) + ``` + +2. Release idle connections. + + Check each connection and release them after obtaining approval from the users of the connections. Run the following SQL command to release a connection using **pid** obtained in the previous step: + + ``` + postgres=# SELECT pg_terminate_backend(140390132872976); + ``` + + Information similar to the following is displayed: + + ``` + postgres=# SELECT pg_terminate_backend(140390132872976); + pg_terminate_backend + ---------------------- + t + (1 row) + ``` + + If no connections can be released, go to the next step. + +3. Increase the maximum number of connections. + + ``` + gs_guc set -D /gaussdb/data/dbnode -c "max_connections= 800" + ``` + + **800** is the new maximum value. + +4. Restart database services to make the new settings take effect. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The restart results in operation interruption. Properly plan the restart to avoid affecting users. + + ``` + gs_om -t stop && gs_om -t start + ``` + + diff --git a/content/en/docs/Administratorguide/checking-time-consistency.md b/content/en/docs/Administratorguide/checking-time-consistency.md new file mode 100644 index 000000000..f79fd1ea5 --- /dev/null +++ b/content/en/docs/Administratorguide/checking-time-consistency.md @@ -0,0 +1,46 @@ +# Checking Time Consistency + +Database transaction consistency is guaranteed by a logical clock and is not affected by OS time. However, OS time inconsistency will lead to problems, such as abnormal backend O&M and monitoring functions. Therefore, you are advised to monthly check time consistency among nodes. + +## Procedure + +1. Log in as the OS user **omm** to any host in the GaussDB Kernel cluster. +2. Create a configuration file containing information about nodes in the cluster. + + ``` + vim /tmp/mpphosts + ``` + + Add the host name of each node. + + ``` + plat1 + plat2 + plat3 + ``` + +3. Save the configuration file. + + ``` + :wq! + ``` + +4. Run the following command and write the time on each node into the **/tmp/sys\_ctl-os1.log** file: + + ``` + for ihost in `cat /tmp/mpphosts`; do ssh -n -q $ihost "hostname;date"; done > /tmp/sys_ctl-os1.log + ``` + +5. Check time consistency between the nodes based on the command output. The time difference should not exceed 30s. + + ``` + cat /tmp/sys_ctl-os1.log + plat1 + Thu Feb 9 16:46:38 CST 2017 + plat2 + Thu Feb 9 16:46:49 CST 2017 + plat3 + Thu Feb 9 16:46:14 CST 2017 + ``` + + diff --git a/content/en/docs/Administratorguide/cleaning-run-logs.md b/content/en/docs/Administratorguide/cleaning-run-logs.md new file mode 100644 index 000000000..0ac2ab0d9 --- /dev/null +++ b/content/en/docs/Administratorguide/cleaning-run-logs.md @@ -0,0 +1,25 @@ +# Cleaning Run Logs + +A large number of run logs will be generated during database running and occupy huge disk space. You are advised to delete expired run logs and retain logs generated within one month. + +## Procedure + +1. Log in as the OS user **omm** to any host in the GaussDB Kernel cluster. +2. Clean logs. + 1. Back up logs generated over one month ago to other disks. + 2. Access the directory where logs are stored. + + ``` + cd $GAUSSLOG + ``` + + 3. Access the corresponding sub-directory and run the following command to delete logs generated one month ago: + + ``` + rm log name + ``` + + The naming convention of a log file is **postgresql-**_year_-_month_-_day_**\_HHMMSS**. + + + diff --git a/content/en/docs/Administratorguide/data-security-maintenance-suggestions.md b/content/en/docs/Administratorguide/data-security-maintenance-suggestions.md new file mode 100644 index 000000000..838e8440d --- /dev/null +++ b/content/en/docs/Administratorguide/data-security-maintenance-suggestions.md @@ -0,0 +1,23 @@ +# Data Security Maintenance Suggestions + +To ensure data security in GaussDB Kernel and prevent accidents, such as data loss and illegal data access, read this section carefully. + +## Preventing Data Loss + +You are advised to plan routine physical backup and store backup files in a reliable medium. If a serious error occurs in the system, you can use the backup files to restore the system to the state at the backup point. + +## Preventing Illegal Data Access + +- You are advised to manage database users based on their permission hierarchies. A database administrator creates users and grants permissions to the users based on service requirements to ensure users properly access the database. +- You are advised to deploy GaussDB Kernel servers and clients \(or applications developed based on the client library\) in trusted internal networks. If the servers and clients must be deployed in an untrusted network, enable SSL encryption before services are started to ensure data transmission security. Note that enabling the SSL encryption function compromises database performance. + +## Preventing System Logs from Leaking Personal Data + +- Delete personal data before sending debug logs to others for analysis. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The log level **log\_min\_messages** is set to **DEBUG**_x_ \(_x_ indicates the debug level and the value ranges from 1 to 5\). The information recorded in debug logs may contain personal data. + +- Delete personal data before sending system logs to others for analysis. If the execution of a SQL statement fails, the error SQL statement will be recorded in a system log by default. SQL statements may contain personal data. +- Set **log\_min\_error\_statement** to **PANIC** to prevent error SQL statements from being recorded in system logs. However, once the function is disabled, it is difficult to locate fault causes if faults occur. + diff --git a/content/en/docs/Administratorguide/exception-handling-1.md b/content/en/docs/Administratorguide/exception-handling-1.md new file mode 100644 index 000000000..841adc2dc --- /dev/null +++ b/content/en/docs/Administratorguide/exception-handling-1.md @@ -0,0 +1,370 @@ +# Exception Handling + +Troubleshoot exceptions detected in the inspection by following instructions in this section. + +**Table 1** Check of openGauss running status + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Check Item

+

Abnormal Status

+

Solution

+

CheckClusterState (Checks the openGauss status.)

+

openGauss or openGauss instances are not started.

+

Run the following command to start openGauss and instances:

+
gs_om -t start
+

The status of openGauss or openGauss instances is abnormal.

+

Check the status of hosts and instances. Troubleshoot this issue based on the status information.

+
gs_check -i CheckClusterState
+

CheckDBParams (Checks database parameters.)

+

Database parameters have incorrect values.

+

Use the gs_guc tool to set the parameters to specified values.

+

CheckDebugSwitch (Checks debug logs.)

+

The log level is incorrect.

+

Use the gs_guc tool to set log_min_messages to specified content.

+

CheckDirPermissions (Checks directory permissions.)

+

The permission for a directory is incorrect.

+

Change the directory permission to a specified value (750 or 700).

+
chmod 750 DIR
+

CheckReadonlyMode (Checks the read-only mode.)

+

The read-only mode is enabled.

+

Verify that the usage of the disk where database nodes are located does not exceed the threshold (60% by default) and no other O&M operations are performed.

+
gs_check -i CheckDataDiskUsage
+ps ux
+

Use the gs_guc tool to disable the read-only mode of openGauss.

+
gs_guc reload -N all -I all -c 'default_transaction_read_only = off' 
+gs_guc reload -N all -I all -c 'default_transaction_read_only = off' 
+

CheckEnvProfile (Checks environment variables.)

+

Environment variables are inconsistent.

+

Update the environment variable information.

+

CheckBlockdev (Checks pre-read blocks.)

+

The size of a pre-read block is not 16384 KB.

+

Use the gs_checkos tool to set the size of the pre-read block to 16384 KB and write the setting into the auto-startup file.

+
gs_checkos -i B3
+

CheckCursorNum (Checks the number of cursors.)

+

The number of cursors fails to be checked.

+

Check whether the database is properly connected and whether the openGauss status is normal.

+

CheckPgxcgroup (Checks the data redistribution status.)

+

There are pgxc_group tables that have not been redistributed.

+

Proceed with the redistribution.

+
gs_expand, gs_shrink
+

CheckDiskFormat (Checks disk configurations.)

+

Disk configurations are inconsistent between nodes.

+

Configure disk specifications to be consistent between nodes.

+

CheckSpaceUsage (Checks the disk space usage.)

+

Disk space is insufficient.

+

Clear or expand the disk for the directory.

+

CheckInodeUsage (Checks the disk index usage.)

+

Disk indexes are insufficient.

+

Clear or expand the disk for the directory.

+

CheckSwapMemory (Checks the swap memory.)

+

The swap memory is greater than the physical memory.

+

Reduce or disable the swap memory.

+

CheckLogicalBlock (Checks logical blocks.)

+

The size of a logical block is not 512 KB.

+

Use the gs_checkos tool to set the size of the logical block to 512 KB and write the setting into the auto-startup file.

+
gs_checkos -i B4
+

CheckIOrequestqueue (Checks I/O requests.)

+

The requested I/O is not 32768.

+

Use the gs_checkos tool to set the requested I/O to 32768 and write the setting into the auto-startup file.

+
gs_checkos -i B4
+

CheckCurConnCount (Checks the number of current connections.)

+

The number of current connections exceeds 90% of the allowed maximum number of connections.

+

Break idle primary database node connections.

+

CheckMaxAsyIOrequests (Checks the maximum number of asynchronous requests.)

+

The maximum number of asynchronous requests is less than 104857600 or (Number of database instances on the current node x 1048576).

+

Use the gs_checkos tool to set the maximum number of asynchronous requests to the larger one between 104857600 and (Number of database instances on the current node x 1048576).

+
gs_checkos -i B4
+

CheckMTU (Checks MTU values.)

+

MTU values are inconsistent between nodes.

+

Set the MTU value on each node to 1500 or 8192.

+
ifconfig eth* MTU 1500
+

CheckIOConfigure (Checks I/O configurations.)

+

The I/O mode is not deadline.

+

Use the gs_checkos tool to set the I/O mode to deadline and write the setting into the auto-startup file.

+
gs_checkos -i B4
+

CheckRXTX (Checks the RX/TX value.)

+

The NIC RX/TX value is not 4096.

+

Use the checkos tool to set the NIC RX/TX value to 4096 for openGauss.

+
gs_checkos -i B5
+

CheckPing (Checks whether the network connection is normal.)

+

There are openGauss IP addresses that cannot be pinged.

+

Check the network settings, network status, and firewall status between the abnormal IP addresses.

+

CheckNetWorkDrop (Checks the network packet loss rate.)

+

The network packet loss rate is greater than 1%.

+

Check the network load and status between the corresponding IP addresses.

+

CheckMultiQueue (Checks the NIC multi-queue function.)

+

Multiqueue is not enabled for the NIC, and NIC interruptions are not bound to different CPU cores.

+

Enable multiqueue for the NIC, and bind NIC interruptions to different CPU cores.

+

CheckEncoding (Checks the encoding format.)

+

Encoding formats are inconsistent between nodes.

+

Write the same encoding format into /etc/profile for each node.

+
echo "export LANG=XXX" >> /etc/profile
+

CheckActQryCount (Checks the archiving mode.)

+

The archiving mode is enabled, and the archiving directory is not under the primary database node directory.

+

Disable archiving mode or set the archiving directory to be under the primary database node directory.

+

CheckFirewall (Checks the firewall.)

+

The firewall is enabled.

+

Disable the firewall.

+
redHat(CentOS)7.x: systemctl status firewalld.service
+redHat(CentOS)6.x: service iptables down
+SuSE: SuSEfirewall2 down
+

CheckKernelVer (Checks kernel versions.)

+

Kernel versions are inconsistent between nodes.

+

Use the gs_replace tool to replace the nodes whose kernel version is inconsistent with that of others.

+
gs_replace
+

CheckMaxHandle (Checks the maximum number of file handles.)

+

The maximum number of handles is less than 1000000.

+

Set the soft and hard limits in the 91-nofile.conf or 90-nofile.conf file to 1000000.

+
gs_checkos -i B2
+

CheckNTPD (Checks the time synchronization service.)

+

The NTPD service is disabled or the time difference is greater than 1 minute.

+

Enable the NTPD service and set the time to be consistent.

+

CheckOSVer (Checks OS versions.)

+

Certain OSs are not supported or the OSs are not in the same hybrid list.

+

Use gs_replace to replace abnormal nodes with those supported by OSs or those in the same hybrid list.

+
gs_replace
+

CheckSysParams (Checks OS parameters.)

+

OS parameter settings do not meet requirements.

+

Use the gs_checkos tool or manually set parameters to values meeting requirements.

+
gs_checkos -i B1
+vim /etc/sysctl.conf
+

CheckTHP (Checks the THP service.)

+

The THP service is disabled.

+

Use the gs_checkos to enable the THP service.

+
gs_checkos -i B6
+

CheckTimeZone (Checks time zones.)

+

Time zones are inconsistent between nodes.

+

Set time zones to be consistent between nodes.

+
cp /usr/share/zoneinfo/$primary time zone/$secondary time zone /etc/localtime
+

CheckCPU (Checks the CPU.)

+

CPU usage is high or I/O waiting time is too long.

+

Upgrade CPUs or improve disk performance.

+

CheckSshdService (Checks the SSHD service.)

+

The SSHD service is disabled.

+

Enable the SSHD service and write the setting into the auto-startup file.

+
server sshd start
+echo "server sshd start" >> initFile
+

CheckSshdConfig (Checks SSHD configurations.)

+

The SSHD service is incorrectly configured.

+

Reconfigure the SSHD service.

+
PasswordAuthentication=no;
+MaxStartups=1000;
+UseDNS=yes;
+ClientAliveInterval=10800/ClientAliveInterval=0
+

Restart the service.

+
server sshd start
+

CheckCrondService (Checks the Crond service.)

+

The Crond service is disabled.

+

Install and enable the Crond service.

+

CheckStack (Checks the stack size.)

+

The stack size is less than 3072.

+

Use the gs_checkos tool to set the stack size to 3072 and restart the processes with a smaller stack size.

+
gs_checkos -i B2
+

CheckNoCheckSum (Checks the NoCheckSum parameter.)

+

NoCheckSum is incorrectly set or its value is inconsistent on each node.

+

Set NoCheckSum to a consistent value on each node. If redHat6.4 or redHat6.5 with the NIC binding mode bond0 exists, set NoCheckSum to Y. In other cases, set it to N.

+
echo Y > /sys/module/sctp/parameters/no_checksums
+

CheckSysPortRange (Checks OS port configurations.)

+

OS IP ports are not within the required port range or openGauss ports are within the OS IP port range.

+

Set the OS IP ports within 26000 to 65535 and set the openGauss ports beyond the OS IP port range.

+
vim /etc/sysctl.conf
+

CheckMemInfo (Checks the memory information.)

+

Memory sizes are inconsistent between nodes.

+

Use physical memory of the same specifications between nodes.

+

CheckHyperThread (Checks the hyper-threading.)

+

The CPU hyper-threading is disabled.

+

Enable the CPU hyper-threading.

+

CheckTableSpace (Checks tablespaces.)

+

The tablespace path is nested with the openGauss path or nested with the path of another tablespace.

+

Migrate tablespace data to the tablespace with a valid path.

+

CheckSctpService (Checks the SCTP service.)

+

The SCTP service is disabled.

+

Install and enable the SCTP service.

+
modprobe sctp
+
+ diff --git a/content/en/docs/Administratorguide/exception-handling-3.md b/content/en/docs/Administratorguide/exception-handling-3.md new file mode 100644 index 000000000..49c04e5b8 --- /dev/null +++ b/content/en/docs/Administratorguide/exception-handling-3.md @@ -0,0 +1,124 @@ +# Exception Handling + +After you use the **gs\_checkperf** tool to check the cluster performance, if the performance is abnormal, troubleshoot the issue by following instructions in this section. + +**Table 1** Cluster-level performance status + + + + + + + + + + + + + + + + + + + + + + + + + +

Abnormal Status

+

Solution

+

High CPU usage of hosts

+

1. Add high-performance CPUs, or replace current CPUs with them.

+

2. Run the top command to check which system processes cause high CPU usage, and run the kill command to stop unused processes.

+
top
+

High CPU usage of GaussDB Kernel

+

1. Add high-performance CPUs, or replace current CPUs with them.

+

2. Run the top command to check which database processes cause high CPU usage, and run the kill command to stop unused processes.

+
top
+

3. Use the gs_expand tool to add new hosts to lower the CPU usage.

+

Low hit ratio of the shared memory

+

1. Expand the memory.

+

2. Run the following command to check the OS configuration file /etc/sysctl.conf and increase the value of kernel.shmmax.

+
vim /etc/sysctl.conf
+

Low in-memory sort ratio

+

Expand the memory.

+

High I/O and disk usage

+

1. Replace current disks with high-performance ones.

+

2. Adjust the data layout to evenly distribute I/O requests to all the physical disks.

+

3. Run VACUUM FULL for the entire database.

+
vacuum full;
+

4. Clean up the disk space.

+

5. Reduce the number of concurrent connections.

+

Transaction statistics

+

Query the pg_stat_activity system catalog to break unnecessary connections.

+
+ +**Table 2** Node-level performance status + + + + + + + + + + + + + + + + +

Abnormal Status

+

Solution

+

High CPU usage

+

1. Add high-performance CPUs, or replace current CPUs with them.

+

2. Run the top command to check which system processes cause high CPU usage, and run the kill command to stop unused processes.

+
top
+

High memory usage

+

Expand or clean up the memory.

+

High I/O usage

+

1. Replace current disks with high-performance ones.

+

2. Clean up the disk space.

+

3. Use memory read/write to replace as much disk I/O as possible, putting frequently accessed files or data in the memory.

+
+ +**Table 3** Session/process-level performance status + + + + + + + + + + +

Abnormal Status

+

Solution

+

High CPU, memory, and I/O usage

+

Check which processes cause high CPU, memory, or I/O usage. If they are unnecessary processes, kill them; otherwise, analyze the specific cause of high usage. For example, if SQL statement execution occupies much memory, check whether the SQL statements need optimization.

+
+ +**Table 4** SSD performance status + + + + + + + + + + +

Abnormal Status

+

Solution

+

SSD read/write fault

+

Run the following command to check whether SSD is faulty. If yes, analyze the specific cause.

+
gs_checkperf -i SSD -U omm
+
+ diff --git a/content/en/docs/Administratorguide/exception-handling.md b/content/en/docs/Administratorguide/exception-handling.md new file mode 100644 index 000000000..995f64323 --- /dev/null +++ b/content/en/docs/Administratorguide/exception-handling.md @@ -0,0 +1,106 @@ +# Exception Handling + +If you use the **gs\_checkos** tool to check the OS and the command output shows **Abnormal**, run the following command to view detailed error information: + +``` +gs_checkos -i A --detail +``` + +The **Abnormal** state cannot be ignored because the OS in this state affects cluster installation. The **Warning** state does not affect cluster installation and thereby can be ignored. + +- If the check result for OS version status \(**A1**\) is **Abnormal**, replace OSs out of the mixed programming scope with those within the scope. +- If the check result for kernel version status \(**A2**\) is **Warning**, the platform kernel versions in the cluster are inconsistent. +- If the check result for Unicode status \(**A3**\) is **Abnormal**, set the same character set for all the hosts. You can add **export LANG=**_unicode_ to the **/etc/profile** file. + + ``` + vim /etc/profile + ``` + +- If the check result for time zone status \(**A4**\) is **Abnormal**, set the same time zone for all the hosts. You can copy the time zone file in the **/usr/share/zoneinfo/** directory as the **/etc/localtime** file. + + ``` + cp /usr/share/zoneinfo/$primary time zone/$secondary time zone /etc/localtime + ``` + +- If the check result for swap memory status \(**A5**\) is **Abnormal**, a possible cause is that the swap memory is larger than the physical memory. You can troubleshoot this issue by reducing the swap memory or increasing the physical memory. +- If the check result for system control parameter status \(**A6**\) is **Abnormal**, troubleshoot this issue in either of the following two ways: + - Run the following command: + + ``` + gs_checkos -i B1 + ``` + + - Modify the **/etc/sysctl.conf** file based on the error message and run **sysctl -p** to make it take effect. + + ``` + vim /etc/sysctl.conf + ``` + + + +- If the check result for file system configuration status \(**A7**\) is **Abnormal**, run the following command to troubleshoot this issue: + + ``` + gs_checkos -i B2 + ``` + +- If the check result for disk configuration status \(**A8**\) is **Abnormal**, set the disk mounting format to **rw,noatime,inode64,allocsize=16m**. + + Run the **man mount** command to mount the XFS parameter: + + ``` + rw,noatime,inode64,allocsize=16m + ``` + + You can also set the XFS parameter in the **/etc/fstab** file. For example: + + ``` + /dev/data /data xfs rw,noatime,inode64,allocsize=16m 0 0 + ``` + +- If the check result for pre-read block size status \(**A9**\) is **Abnormal**, run the following command to troubleshoot this issue: + + ``` + gs_checkos -i B3 + ``` + +- If the check result for I/O scheduling status \(**A10**\) is **Abnormal**, run the following command to troubleshoot this issue: + + ``` + gs_checkos -i B4 + ``` + +- If the check result for NIC configuration status \(**A11**\) is **Warning**, run the following command to troubleshoot this issue: + + ``` + gs_checkos -i B5 + ``` + +- If the check result for time consistency status \(**A12**\) is **Abnormal**, verify that the NTP service has been installed and started and has synchronized time from the NTP clock. +- If the check result for firewall status \(**A13**\) is **Abnormal**, disable the firewall. Run the following commands: + - SUSE: + + ``` + SuSEfirewall2 stop + ``` + + - RedHat7: + + ``` + systemctl disable firewalld + ``` + + - RedHat6: + + ``` + service iptables stop + ``` + + +- If the check result for THP service status \(**A14**\) is **Abnormal**, run the following command to troubleshoot this issue: + + ``` + gs_checkos -i B6 + ``` + + diff --git a/content/en/docs/Administratorguide/generating-configuration-files.md b/content/en/docs/Administratorguide/generating-configuration-files.md new file mode 100644 index 000000000..7e275ff68 --- /dev/null +++ b/content/en/docs/Administratorguide/generating-configuration-files.md @@ -0,0 +1,61 @@ +# Generating Configuration Files + +## Background + +If a static configuration file is damaged while you use openGauss, openGauss cannot obtain information about the openGauss topology structure and primary/standby relationship, affecting the openGauss function. In this case, you can use the **gs\_om** tool to generate a new static configuration file to replace the damaged file, ensuring normal openGauss running. + +## Prerequisites + +None + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to generate configuration files in a specified directory on the current host: + + ``` + gs_om -t generateconf -X /opt/software/openGauss/clusterconfig.xml --distribute + ``` + + **/opt/software/openGauss/clusterconfig.xml** is the directory for saving XML configuration files during the openGauss installation. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >1. After the command is executed, the new configuration file storage directory is displayed in the log information. Take a one-primary dual-standby environment as an example. This directory contains three configuration files named by host names. You need to replace the configuration files of corresponding hosts with the three files respectively. + >2. If **--distribute** is not specified, perform [3](#en-us_topic_0237088792_en-us_topic_0059777801_lc1ce55d572e44beea3e47b1b427fae3e) to distribute static configuration files to their corresponding hosts. If **--distribute** is specified, the static configuration files are automatically distributed and you do not need to perform [3](#en-us_topic_0237088792_en-us_topic_0059777801_lc1ce55d572e44beea3e47b1b427fae3e). + +3. \(Optional\) Replace the damaged static configuration files of the three hosts in the **/opt/gaussdb/app/bin** directory. + + Take one host as an example: + + ``` + mv /opt/huawei/wisequery/script/static_config_files/cluster_static_config_SIA1000056771 /opt/gaussdb/app/bin/cluster_static_config + ``` + + +## Examples + +Run the following commands on any of the hosts in openGauss to generate configuration files: + +``` +gs_om -t generateconf -X /opt/software/openGauss/clusterconfig.xml --distribute +Generating static configuration files for all nodes. +Creating temp directory to store static configuration files. +Successfully created the temp directory. +Generating static configuration files. +Successfully generated static configuration files. +Static configuration files for all nodes are saved in /opt/huawei/Bigdata/mppdb/wisequery/script/static_config_files. +Distributing static configuration files to all nodes. +Successfully distributed static configuration files. +``` + +Open the generated configuration file directory that contains three new files. + +``` +cd /opt/huawei/Bigdata/mppdb/wisequery/script/static_config_files +ll +total 456 +-rwxr-xr-x 1 omm dbgrp 155648 2016-07-13 15:51 cluster_static_config_plat1 +-rwxr-xr-x 1 omm dbgrp 155648 2016-07-13 15:51 cluster_static_config_plat2 +-rwxr-xr-x 1 omm dbgrp 155648 2016-07-13 15:51 cluster_static_config_plat3 +``` + diff --git a/content/en/docs/Administratorguide/gs_basebackup.md b/content/en/docs/Administratorguide/gs_basebackup.md new file mode 100644 index 000000000..e9841be0e --- /dev/null +++ b/content/en/docs/Administratorguide/gs_basebackup.md @@ -0,0 +1,125 @@ +# gs\_basebackup + +## Background + +After openGauss is deployed, problems and exceptions may occur during database running. **gs\_basebackup**, provided by openGauss, is used to perform basic physical backup. **gs\_basebackup** copies the binary files of the database on the server using a replication protocol. To remotely execute **gs\_basebackup**, you need to use the system administrator account. **gs\_basebackup** supports only hot backup and does not support compressed backup. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- **gs\_basebackup** supports only full backup. +>- **gs\_basebackup** supports only hot backup and does not support compressed backup. +>- **gs\_basebackup** cannot back up tablespaces containing absolute paths on the same server. This is because the absolute path is unique on the same machine, and brings about conflicts. However, it can back up tablespaces containing absolute paths on different machines. +>- If the functions of incremental checkpoint and dual-write are enabled, **gs\_basebackup** also backs up dual-write files. +>- If the **pg\_xlog** directory is a soft link, no soft link is created during backup. Data is directly backed up to the **pg\_xlog** directory in the destination path. + +## Prerequisites + +- The openGauss database can be connected. Link replication is enabled in **pg\_hba.conf**, and at least one **max\_wal\_senders** is configured and available. +- During the restoration, backup files exist in the backup directory on all the nodes. If backup files are lost on any node, copy them to it from another node. + +## Syntax + +- Display help information. + + ``` + gs_basebackup -? | --help + ``` + +- Display version information. + + ``` + gs_basebackup -V | --version + ``` + + +## Parameter Description + +The **gs\_basebackup** tool can use the following types of parameters: + +- -D directory + + Directory for storing backup files. This parameter is mandatory. + + +- Common parameters + - -c, --checkpoint=fast|spread + + Sets the checkpoint mode to **fast** or **spread** \(default\). + + - -l, --label=LABEL + + Adds tags for the backup. + + - -P, --progress + + Enables the progress report. + + - -v, --verbose + + Enables the verbose mode. + + - -V, --version + + Prints the version and exits. + + - -?, --help + + Displays **gs\_basebackup** command parameters. + + +- Connection parameters + - -h, --host=HOSTNAME + + Specifies the host name of the machine on which the server is running or the directory for the Unix-domain socket. + + - -p, --port=PORT + + Specifies the port number of the database server. + + You can modify the default port number using this parameter. + + - -U, --username=USERNAME + + Specifies the user that connects to the database. + + - -s, --status-interval=INTERVAL + + Specifies the time for sending status packets to the server, in seconds. + + - -w,--no-password + + Never issues a password prompt. + + - -W, --password + + Issues a password prompt when the **-U** parameter is used to connect to a local or remote database. + + + +## Example + +``` +gs_basebackup -D /home/test/trunk/install/data/backup -h 127.0.0.1 -p 21233 +INFO: The starting position of the xlog copy of the full build is: 0/1B800000. The slot minimum LSN is: 0/1B800000. +``` + +## Restoring Data from Backup Files + +If a database is faulty, restore it from backup files. **gs\_basebackup** backs up the database in binary mode. Therefore, you can directly copy and replace the original files or start the database on the backup database. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- If the current database instance is running, a port conflict may occur when you start the database from the backup file. In this case, you need to modify the port parameter in the configuration file or specify a port when starting the database. +>- If the current backup file is a primary/standby database, you may need to modify the replication connections between the master and slave databases. That is, **replconninfo1** and **replconninfo2** in the **postgre.conf** file. + +To restore the original database, perform the following steps: + +1. Stop the database server. For details, see _Administrator Guide_. +2. Copy the original database and all tablespaces to another location for future use. +3. Delete all or part of the files from the original database. +4. Use the database system user rights to restore the required database files from the backup. +5. If a link file exists in the database, modify the link file so that it can be linked to the correct file. +6. Restart the database server and check the database content to ensure that the database is restored to the required status. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- Incremental restoration from backup files is not supported. +>- After the restoration, check that the link file in the database is linked to the correct file. + diff --git a/content/en/docs/Administratorguide/gs_dump.md b/content/en/docs/Administratorguide/gs_dump.md new file mode 100644 index 000000000..093b7d741 --- /dev/null +++ b/content/en/docs/Administratorguide/gs_dump.md @@ -0,0 +1,561 @@ +# gs\_dump + +## Background + +gs\_dump, provided by openGauss, is used to export database information. You can export a database or its objects \(such as schemas, tables, and views\). The database can be the default postgres database or a user-specified database. + +**gs\_dump** is executed by OS user **omm**. + +When **gs\_dump** is used to export data, other users can still access \(read and write\) openGauss databases. + +**gs\_dump** can export complete, consistent data. For example, if **gs\_dump** is started to export database A at T1, data of the database at that time point will be exported, and modifications on the database after that time point will not be exported. + +**gs\_dump** can export database information to a plain-text SQL script file or archive file. + +- Plain-text SQL script: It contains the SQL statements required to restore the database. You can use [**gsql**](en-us_topic_0249632261.md) to execute the SQL script. With only a little modification, the SQL script can rebuild a database on other hosts or database products. +- Archive file: It contains data required to restore the database. It can be a tar-, directory-, or custom-format archive. For details, see [Table 1](#en-us_topic_0249632271_en-us_topic_0237152335_en-us_topic_0058967678_t17db29a12e7342cfbf02b2f6e50ff1a5). The export result must be used with [**gs\_restore**](gs_restore.md#EN-US_TOPIC_0250273519) to restore the database. The system allows users to select or even to sort the content to be imported. + +## Functions + +**gs\_dump** can create export files in four formats, which are specified by **-F** or **--format=**, as listed in [Table 1](#en-us_topic_0249632271_en-us_topic_0237152335_en-us_topic_0058967678_t17db29a12e7342cfbf02b2f6e50ff1a5). + +**Table 1** Formats of exported files + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Format

+

Value of -F

+

Description

+

Suggestion

+

Corresponding Import Tool

+

Plain-text

+

p

+

A plain-text script file containing SQL statements and commands. The commands can be executed on gsql, a command line terminal, to recreate database objects and load table data.

+

You are advised to use plain-text exported files for small databases.

+

Before using gsql to restore database objects, you can use a text editor to edit the plain-text export file as required.

+

Custom

+

c

+

A binary file that allows the restoration of all or selected database objects from an exported file.

+

You are advised to use custom-format archive files for medium or large database.

+

You can use gs_restore to import database objects from a custom-format archive.

+

Directory

+

d

+

A directory containing directory files and the data files of tables and BLOB objects.

+

-

+

.tar

+

t

+

A tar-format archive that allows the restoration of all or selected database objects from an exported file. It cannot be further compressed and has an 8-GB limitation on the size of a single table.

+

-

+
+ +>![](public_sys-resources/icon-note.gif) **NOTE:** +>To reduce the size of an exported file, you can use **gs\_dump** to compress it to a plain-text file or custom-format file. By default, a plain-text file is not compressed when generated. When a custom-format archive is generated, a medium level of compression is applied by default. Archived exported files cannot be compressed using **gs\_dump**. When a plain-text file is exported in compressed mode, **gsql** fails to import data objects. + +## Precautions + +Do not modify an exported file or its content. Otherwise, restoration may fail. + +To ensure the data consistency and integrity, **gs\_dump** acquires a share lock on a table to be dumped. If another transaction has acquired a share lock on the table, **gs\_dump** waits until this lock is released and then locks the table for dumping. If the table cannot be locked within the specified time, the dump fails. You can customize the timeout duration to wait for lock release by specifying the **--lock-wait-timeout** parameter. + +## Syntax + +``` +gs_dump [OPTION]... [DBNAME] +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>_DBNAME_ does not follow a short or long option. It specifies the database to be connected. +>For example: +>Specify _DBNAME_ without a **-d** option preceding it. +>``` +>gs_dump -p port_number postgres -f dump1.sql +>``` +>or +>``` +>export PGDATABASE=postgres +>``` +>``` +> gs_dump -p port_number -f dump1.sql +>``` +>Environment variable: _PGDATABASE_ + +## Parameter Description + +Common parameters + +- -f, --file=FILENAME + + Sends the output to the specified file or directory. If this parameter is omitted, the standard output is generated. If the output format is **\(-F c/-F d/-F t\)**, the **-f** parameter must be specified. If the value of the **-f** parameter contains a directory, the current user must have the read and write permissions on the directory, and the directory cannot be an existing one. + +- -F, --format=c|d|t|p + + Selects the exported file format. The format can be: + + - **p|plain**: Generates a text SQL script file. This is the default value. + - **c|custom**: Outputs a custom-format archive as a directory to be used as the input of **gs\_restore**. This is the most flexible output format in which users can manually select it and reorder the archived items during restoration. An archive in this format is compressed by default. + - **d|directory**: Creates a directory containing directory files and the data files of tables and BLOBs. + - **t|tar**: Outputs a .tar archive as the input of **gs\_restore**. The .tar format is compatible with the directory format. Extracting a .tar archive generates a valid directory-format archive. However, the .tar archive cannot be further compressed and has an 8-GB limitation on the size of a single table. The order of table data items cannot be changed during restoration. + + A .tar archive can be used as input of **gsql**. + + +- -v, --verbose + + Specifies the verbose mode. If it is specified, **gs\_dump** writes detailed object comments and the number of startups/stops to the dump file, and progress messages to standard error. + +- -V, --version + + Prints the **gs\_dump** version and exits. + +- -Z, --compress=0-9 + + Specifies the used compression level. + + Value range: 0-9 + + - **0** indicates no compression. + - **1** indicates that the compression ratio is the lowest and processing speed the fastest. + - **9** indicates that the compression ratio is the highest and processing speed the slowest. + + For the custom-format archive, this option specifies the compression level of a single table data segment. By default, data is compressed at a medium level. The plain-text and .tar archive formats do not support compression currently. + +- --lock-wait-timeout=TIMEOUT + + Do not keep waiting to obtain shared table locks since the beginning of the dump. Consider it as failed if you are unable to lock a table within the specified time. The timeout period can be specified in any of the formats accepted by **SET statement\_timeout**. + +- -?, --help + + Displays help about **gs\_dump** parameters and exits. + + +Dump parameters: + +- -a, --data-only + + Generates only the data, not the schema \(data definition\). Dump the table data, big objects, and sequence values. + +- -b, --blobs + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- -c, --clean + + Before writing the command of creating database objects into the backup file, writes the command of clearing \(deleting\) database objects to the backup files. \(If no objects exist in the target database, **gs\_restore** probably displays some error information.\) + + This parameter is used only for the plain-text format. For the archive format, you can specify the option when using **gs\_restore**. + +- -C, --create + + The backup file content starts with the commands of creating the database and connecting to the created database. \(If the command script is executed in this mode, you can specify any database to run the command for creating a database. The data is restored to the created database instead of the specified database.\) + + This parameter is used only for the plain-text format. For the archive format, you can specify the option when using **gs\_restore**. + +- -E, --encoding=ENCODING + + Creates a dump file in the specified character set encoding. By default, the dump file is created in the database encoding. \(Alternatively, you can set the environment variable **PGCLIENTENCODING** to the required dump encoding.\) + +- -n, --schema=SCHEMA + + Dumps only schemas matching the schema names. This option contains the schema and all its contained objects. If this option is not specified, all non-system schemas in the target database will be dumped. Multiple schemas can be selected by specifying multiple **-n** options. The schema parameter is interpreted as a pattern according to the same rules used by the **\\d** command of **gsql**. Therefore, multiple schemas can also be selected by writing wildcard characters in the pattern. When you use wildcard characters, quote the pattern to prevent the shell from expanding the wildcard characters. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- If **-n** is specified, **gs\_dump** does not dump any other database objects which the selected schemas might depend upon. Therefore, there is no guarantee that the results of a specific-schema dump can be automatically restored to an empty database. + >- If **-n** is specified, the non-schema objects are not dumped. + + Multiple schemas can be dumped. Entering **-n **_schemaname_ multiple times dumps multiple schemas. + + For example: + + ``` + gs_dump -h host_name -p port_number postgres -f backup/bkp_shl2.sql -n sch1 -n sch2 + ``` + + In the preceding example, **sch1** and **sch2** are dumped. + +- -N, --exclude-schema=SCHEMA + + Does not dump any schemas matching the schemas pattern. The pattern is interpreted according to the same rules as for **-n**. **-N** can be specified multiple times to exclude schemas matching any of the specified patterns. + + When both **-n** and **-N** are specified, the schemas that match at least one **-n** option but no **-N** is dumped. If **-N** is specified and **-n** is not, the schemas matching **-N** are excluded from what is normally dumped. + + Dump allows you to exclude multiple schemas during dumping. + + Specify **-N exclude schema name** to exclude multiple schemas during dumping. + + For example: + + ``` + gs_dump -h host_name -p port_number postgres -f backup/bkp_shl2.sql -N sch1 -N sch2 + ``` + + In the preceding example, **sch1** and **sch2** will be excluded during the dumping. + +- -o, --oids + + Dumps object identifiers \(OIDs\) as parts of the data in each table. Use this option if your application references the OID columns in some way. If the preceding situation does not occur, do not use this parameter. + +- -O, --no-owner + + Do not output commands to set ownership of objects to match the original database. By default, **gs\_dump** issues the **ALTER OWNER** or **SET SESSION AUTHORIZATION** statement to set ownership of created database objects. These statements will fail when the script is running unless it is started by a system administrator \(or the same user that owns all of the objects in the script\). To make a script that can be stored by any user and give the user ownership of all objects, specify **-O**. + + This parameter is used only for the plain-text format. For the archive format, you can specify the option when using **gs\_restore**. + +- -s, --schema-only + + Dumps only the object definition \(schema\) but not data. + +- -S, --sysadmin=NAME + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- -t, --table=TABLE + + Specifies a list of tables, views, sequences, or foreign tables to be dumped. You can use multiple **-t** parameters or wildcard characters to specify tables. + + When you use wildcard characters, quote patterns to prevent the shell from expanding the wildcard characters. + + The **-n** and **-N** options have no effect when **-t** is used, because tables selected by using **-t** will be dumped regardless of those options. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The number of **-t** parameters must be less than or equal to 100. + >- If the number of **-t** parameters is greater than 100, you are advised to use the **--include-table-file** parameter to replace some **-t** parameters. + >- If **-t** is specified, **gs\_dump** does not dump any other database objects which the selected tables might depend upon. Therefore, there is no guarantee that the results of a specific-table dump can be automatically restored to an empty database. + >- **-t tablename** only dumps visible tables in the default search path. **-t '\*.tablename'** dumps _tablename_ tables in all the schemas of the dumped database. **-t schema.table** dumps tables in a specific schema. + >- **-t tablename** does not export trigger information from a table. + + For example: + + ``` + gs_dump -h host_name -p port_number postgres -f backup/bkp_shl2.sql -t schema1.table1 -t schema2.table2 + ``` + + In the preceding example, **schema1.table1** and **schema2.table2** are dumped. + +- --include-table-file=FILENAME + + Specifies the table file to be dumped. + +- -T, --exclude-table=TABLE + + Specifies a list of tables, views, sequences, or foreign tables not to be dumped. You can use multiple **-T** parameters or wildcard characters to specify tables. + + When **-t** and **-T** are input, the object will be stored in **-t** list not **-T** table object. + + For example: + + ``` + gs_dump -h host_name -p port_number postgres -f backup/bkp_shl2.sql -T table1 -T table2 + ``` + + In the preceding example, **table1** and **table2** are excluded from the dumping. + +- --exclude-table-file=FILENAME + + Specifies the table files that do not need to be dumped. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >Same as **--include-table-file**, the content format of this parameter is as follows: + >schema1.table1 + >schema2.table2 + >...... + +- -x, --no-privileges|--no-acl + + Prevents the dumping of access permissions \(grant/revoke commands\). + +- --binary-upgrade + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --binary-upgrade-usermap="USER1=USER2" + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --column-inserts|--attribute-inserts + + Exports data by running the **INSERT** command with explicit column names **\{INSERT INTO table \(column, ...\) VALUES ...\}**. This will cause a slow restoration. However, since this option generates an independent command for each row, an error in reloading a row causes only the loss of the row rather than the entire table content. + +- --disable-dollar-quoting + + Disables the use of dollar sign \($\) for function bodies, and forces them to be quoted using the SQL standard string syntax. + +- --disable-triggers + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --exclude-table-data=TABLE + + Does not dump data that matches any of table patterns. The pattern is interpreted according to the same rules as for **-t**. + + **--exclude-table-data** can be entered more than once to exclude tables matching any of several patterns. When you need the specified table definition rather than data in the table, this option is helpful. + + To exclude data of all tables in the database, see [--schema-only](#en-us_topic_0249632271_en-us_topic_0237152335_en-us_topic_0059777770_l35ed3d5a093e42ab8fc945dd3ca80ecd). + +- --inserts + + Dumps data by the **INSERT** statement \(rather than **COPY**\). This will cause a slow restoration. + + However, since this option generates an independent command for each row, an error in reloading a row causes only the loss of the row rather than the entire table content. The restoration may fail if you rearrange the column order. The **--column-inserts** option is unaffected against column order changes, though even slower. + +- --no-security-labels + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --no-tablespaces + + Does not issue commands to select tablespaces. All the objects will be created during restoration, no matter which tablespace is selected when using this option. + + This parameter is used only for the plain-text format. For the archive format, you can specify the option when using **gs\_restore**. + +- --no-unlogged-table-data + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --non-lock-table + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --include-alter-table + + Dumps deleted columns of tables. This option records deleted columns. + +- --quote-all-identifiers + + Forcibly quotes all identifiers. This parameter is useful when you dump a database for migration to a later version, in which additional keywords may be introduced. + +- --section=SECTION + + Specifies dumped name sections \(pre-data, data, or post-data\). + +- --serializable-deferrable + + Uses a serializable transaction for the dump to ensure that the used snapshot is consistent with later database status. Perform this operation at a time point in the transaction flow, at which everything is normal. This ensures successful transaction and avoids serialization failures of other transactions, which requires serialization again. + + This option has no benefits for disaster recovery. During the upgrade of the original database, loading a database copy as a report or loading other shared read-only dump is helpful. If the option does not exist, dump reveals a status which is different from the submitted sequence status of any transaction. + + This option will make no difference if there are no active read-write transactions when **gs\_dump** is started. If the read-write transactions are in active status, the dump start time will be delayed for an uncertain period. + +- --use-set-session-authorization + + Specifies that the standard SQL **SET SESSION AUTHORIZATION** command rather than **ALTER OWNER** is returned to ensure the object ownership. This makes dumping more standard. However, if a dump file contains objects that have historical problems, restoration may fail. A dump using **SET SESSION AUTHORIZATION** requires the system administrator permissions, whereas **ALTER OWNER** requires lower permissions. + +- --with-encryption=AES128 + + Specifies that dumping data needs to be encrypted using AES128. + +- --with-key=KEY + + Specifies that the key length of AES128 must be 16 bytes. + +- --include-depend-objs + + Includes information about the objects that depend on the specified object in the backup result. This parameter takes effect only if the **-t** or **--include-table-file** parameter is specified. + +- --exclude-self + + Excludes information about the specified object from the backup result. This parameter takes effect only if the **-t** or **--include-table-file** parameter is specified. + +- --dont-overwrite-file + + The existing files in plain-text, .tar, and custom formats will be overwritten. This option is not used for the directory format. + + For example: + + Assume that the **backup.sql** file exists in the current directory. If you specify **-f backup.sql** in the input command, and the **backup.sql** file is generated in the current directory, the original file will be overwritten. + + If the backup file already exists and **--dont-overwrite-file** is specified, an error will be reported with the message that the dump file exists. + + ``` + gs_dump -p port_number postgres -f backup.sql -F plain --dont-overwrite-file + ``` + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- The **-s/--schema-only** and **-a/--data-only** parameters do not coexist. +>- The **-c/--clean** and **-a/--data-only** parameters do not coexist. +>- **--inserts/--column-inserts** and **-o/--oids** do not coexist, because **OIDS** cannot be set using the **INSERT** statement. +>- **--role** must be used in conjunction with **--rolepassword**. +>- **--binary-upgrade-usermap** must be used in conjunction with **--binary-upgrade**. +>- **--include-depend-objs** or **--exclude-self** takes effect only when **-t** or **--include-table-file** is specified. +>- **--exclude-self** must be used in conjunction with **--include-depend-objs**. + +Connection parameters: + +- -h, --host=HOSTNAME + + Specifies the host name. If the value begins with a slash \(/\), it is used as the directory for the UNIX domain socket. The default value is taken from the **PGHOST** environment variable \(if available\). Otherwise, a Unix domain socket connection is attempted. + + This parameter is used only for defining names of the hosts outside openGauss. The names of the hosts inside openGauss must be 127.0.0.1. + + Example: _host name_ + + Environment variable: **PGHOST** + +- -p, --port=PORT + + Specifies the host port number. If the thread pool function is enabled, you are advised to use **pooler port**, that is, the host port number plus 1. + + Environment variable: **PGPORT** + +- -U, --username=NAME + + Specifies the username of the host to be connected. + + If the username of the host to be connected is not specified, the system administrator is used by default. + + Environment variable: **PGUSER** + +- -w, --no-password + + Never issues a password prompt. The connection attempt fails if the host requires password verification and the password is not provided in other ways. This parameter is useful in batch jobs and scripts in which no user password is required. + +- -W, --password=PASSWORD + + Specifies the user password for connection. If the host uses the trust authentication policy, the administrator does not need to enter the **-W** option. If the **-W** option is not provided and you are not a system administrator, the Dump Restore tool will ask you to enter a password. + +- --role=ROLENAME + + Specifies a role name to be used for creating the dump. If this option is selected, the **SET ROLE** command will be issued after the database is connected to **gs\_dump**. It is useful when the authenticated user \(specified by **-U**\) lacks the permissions required by **gs\_dump**. It allows the user to switch to a role with the required permissions. Some installations have a policy against logging in directly as a super administrator. This option allows dumping data without violating the policy. + +- --rolepassword=ROLEPASSWORD + + Specifies the password for a role. + + +## Notice + +If any local additions need to be added to the template1 database in openGauss, restore the output of **gs\_dump** into an empty database with caution. Otherwise, you are likely to obtain errors due to duplicate definitions of the added objects. To create an empty database without any local additions, copy data from template0 rather than template1. Example: + +``` +CREATE DATABASE foo WITH TEMPLATE template0; +``` + +The .tar file size must be smaller than 8 GB. \(This is the .tar file format limitations.\) The total size of a .tar archive and any of the other output formats are not limited, except possibly by the OS. + +The dump file generated by **gs\_dump** does not contain the statistics used by the optimizer to make execution plans. Therefore, you are advised to run **ANALYZE** after restoring from a dump file to ensure optimal performance. The dump file does not contain any **ALTER DATABASE ... SET** commands. These settings are dumped by **gs\_dumpall**, along with database users and other installation settings. + +## Examples + +Use **gs\_dump** to dump a database as a SQL text file or a file in other formats. + +In the following examples, **Bigdata@123** indicates the password for the database user. **backup/MPPDB\_backup.sql** indicates an exported file where **backup** indicates the relative path of the current directory. **37300** indicates the port number of the database server. **postgres** indicates the name of the database to be accessed. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>Before exporting files, ensure that the directory exists and you have the read and write permissions on the directory. + +Example 1: Use **gs\_dump** to export the full information of the postgres database. The exported **MPPDB\_backup.sql** file is in plain-text format. + +``` +gs_dump -U omm -W Bigdata@123 -f backup/MPPDB_backup.sql -p 37300 postgres -F p +gs_dump[port='37300'][postgres][2018-06-27 09:49:17]: The total objects number is 356. +gs_dump[port='37300'][postgres][2018-06-27 09:49:17]: [100.00%] 356 objects have been dumped. +gs_dump[port='37300'][postgres][2018-06-27 09:49:17]: dump database postgres successfully +gs_dump[port='37300'][postgres][2018-06-27 09:49:17]: total time: 1274 ms +``` + +Use **gsql** to import data from the exported plain-text file. + +Example 2: Use **gs\_dump** to export the full information of the postgres database. The exported **MPPDB\_backup.tar** file is in .tar format. + +``` +gs_dump -U omm -W Bigdata@123 -f backup/MPPDB_backup.tar -p 37300 postgres -F t +gs_dump[port='37300'][postgres][2018-06-27 10:02:24]: The total objects number is 1369. +gs_dump[port='37300'][postgres][2018-06-27 10:02:53]: [100.00%] 1369 objects have been dumped. +gs_dump[port='37300'][postgres][2018-06-27 10:02:53]: dump database postgres successfully +gs_dump[port='37300'][postgres][2018-06-27 10:02:53]: total time: 50086 ms +``` + +Example 3: Use **gs\_dump** to export the full information of the postgres database. The exported **MPPDB\_backup.dmp** file is in custom format. + +``` +gs_dump -U omm -W Bigdata@123 -f backup/MPPDB_backup.dmp -p 37300 postgres -F c +gs_dump[port='37300'][postgres][2018-06-27 10:05:40]: The total objects number is 1369. +gs_dump[port='37300'][postgres][2018-06-27 10:06:03]: [100.00%] 1369 objects have been dumped. +gs_dump[port='37300'][postgres][2018-06-27 10:06:03]: dump database postgres successfully +gs_dump[port='37300'][postgres][2018-06-27 10:06:03]: total time: 36620 ms +``` + +Example 4: Use **gs\_dump** to export the full information of the postgres database. The exported **MPPDB\_backup** file is in directory format. + +``` +gs_dump -U omm -W Bigdata@123 -f backup/MPPDB_backup -p 37300 postgres -F d +gs_dump[port='37300'][postgres][2018-06-27 10:16:04]: The total objects number is 1369. +gs_dump[port='37300'][postgres][2018-06-27 10:16:23]: [100.00%] 1369 objects have been dumped. +gs_dump[port='37300'][postgres][2018-06-27 10:16:23]: dump database postgres successfully +gs_dump[port='37300'][postgres][2018-06-27 10:16:23]: total time: 33977 ms +``` + +Example 5: Use **gs\_dump** to export the information of the postgres database, excluding the information of the table specified in the **/home/MPPDB\_temp.sql** file. The exported **MPPDB\_backup.sql** file is in plain-text format. + +``` +gs_dump -U omm -W Bigdata@123 -p 37300 postgres --exclude-table-file=/home/MPPDB_temp.sql -f backup/MPPDB_backup.sql +gs_dump[port='37300'][postgres][2018-06-27 10:37:01]: The total objects number is 1367. +gs_dump[port='37300'][postgres][2018-06-27 10:37:22]: [100.00%] 1367 objects have been dumped. +gs_dump[port='37300'][postgres][2018-06-27 10:37:22]: dump database postgres successfully +gs_dump[port='37300'][postgres][2018-06-27 10:37:22]: total time: 37017 ms +``` + +Example 6: Use **gs\_dump** to export only the information about the views that depend on the **testtable** table. Create another **testtable** table, and then restore the views that depend on it. + +- Back up only the views that depend on the **testtable** table. + + ``` + gs_dump -s -p 37300 postgres -t PUBLIC.testtable --include-depend-objs --exclude-self -f backup/MPPDB_backup.sql -F p + gs_dump[port='37300'][postgres][2018-06-15 14:12:54]: The total objects number is 331. + gs_dump[port='37300'][postgres][2018-06-15 14:12:54]: [100.00%] 331 objects have been dumped. + gs_dump[port='37300'][postgres][2018-06-15 14:12:54]: dump database postgres successfully + gs_dump[port='37300'][postgres][2018-06-15 14:12:54]: total time: 327 ms + ``` + +- Change the name of the **testtable** table. + + ``` + gsql -p 37300 postgres -r -c "ALTER TABLE PUBLIC.testtable RENAME TO testtable_bak;" + ``` + +- Create another **testtable** table. + + ``` + CREATE TABLE PUBLIC.testtable(a int, b int, c int); + ``` + +- Restore the views for the new **testtable** table. + + ``` + gsql -p 37300 postgres -r -f backup/MPPDB_backup.sql + ``` + + +## Helpful Links + +[gs\_dumpall](gs_dumpall.md#EN-US_TOPIC_0250273518) and [gs\_restore](gs_restore.md#EN-US_TOPIC_0250273519) + diff --git a/content/en/docs/Administratorguide/gs_dumpall.md b/content/en/docs/Administratorguide/gs_dumpall.md new file mode 100644 index 000000000..08aa6b1c8 --- /dev/null +++ b/content/en/docs/Administratorguide/gs_dumpall.md @@ -0,0 +1,253 @@ +# gs\_dumpall + +## Background + +**gs\_dumpall**, provided by openGauss, is used to export all openGauss database information, including data of the default database postgres, user-defined databases, and common global objects of all openGauss databases. + +**gs\_dumpall** is executed by OS user **omm**. + +When **gs\_dumpall** is used to export data, other users can still access \(read and write\) openGauss databases. + +**gs\_dumpall** can export complete, consistent data. For example, if **gs\_dumpall** is started to export openGauss database at T1, data of the database at that time point will be exported, and modifications on the database after that time point will not be exported. + +**gs\_dumpall** exports all openGauss databases in two parts: + +- **gs\_dumpall** exports all global objects, including information about database users and groups, tablespaces, and attributes \(for example, global access permissions\). +- **gs\_dumpall** invokes **gs\_dump** to export SQL scripts from each openGauss database, which contain all the SQL statements required to restore databases. + +The exported files are both plain-text SQL scripts. Use [gsql](en-us_topic_0085031848.md) to execute them to restore openGauss databases. + +## Precautions + +- Do not modify an exported file or its content. Otherwise, restoration may fail. +- To ensure the data consistency and integrity, **gs\_dumpall** acquires a share lock on a table to be dumped. If another transaction has acquired a share lock on the table, **gs\_dumpall** waits until this lock is released and then locks the table for dumping. If the table cannot be locked within the specified time, the dump fails. You can customize the timeout duration to wait for lock release by specifying the **--lock-wait-timeout** parameter. +- During an export, **gs\_dumpall** reads all tables in a database. Therefore, you need to connect to the database as an openGauss administrator to export a complete file. When you use **gsql** to execute SQL scripts, cluster administrator permissions are also required to add users and user groups, and create databases. + +## Syntax + +``` +gs_dumpall [OPTION]... +``` + +## Parameter Description + +Common parameters: + +- -f, --filename=FILENAME + + Sends the output to the specified file. If this parameter is omitted, the standard output is generated. + +- -v, --verbose + + Specifies the verbose mode. If it is specified, **gs\_dumpall** writes detailed object comments and number of startups/stops to the dump file, and progress messages to standard error. + +- -V, --version + + Prints the _gs\_dumpall_ version and exits. + +- --lock-wait-timeout=TIMEOUT + + Do not keep waiting to obtain shared table locks at the beginning of the dump. Consider it as failed if you are unable to lock a table within the specified time. The timeout period can be specified in any of the formats accepted by **SET statement\_timeout**. + +- -?, --help + + Displays help about **gs\_dumpall** parameters and exits. + + +Dump parameters: + +- -a, --data-only + + Dumps only the data, not the schema \(data definition\). + +- -c, --clean + + Runs SQL statements to delete databases before rebuilding them. Statements for dumping roles and tablespaces are added. + +- -g, --globals-only + + Dumps only global objects \(roles and tablespaces\) but no databases. + +- -o, --oids + + Dumps object identifiers \(OIDs\) as parts of the data in each table. Use this parameter if your application references the OID columns in some way. If the preceding situation does not occur, do not use this parameter. + +- -O, --no-owner + + Do not output commands to set ownership of objects to match the original database. By default, **gs\_dumpall** issues the **ALTER OWNER** or **SET SESSION AUTHORIZATION** command to set ownership of created schema objects. These statements will fail when the script is running unless it is started by a system administrator \(or the same user that owns all of the objects in the script\). To make a script that can be stored by any user and give the user ownership of all objects, specify **-O**. + +- -r, --roles-only + + Dumps only roles but not databases or tablespaces. + +- -s, --schema-only + + Dumps only the object definition \(schema\) but not data. + +- -S, --sysadmin=NAME + + Name of the system administrator during the dump. + +- -t, --tablespaces-only + + Dumps only tablespaces but not databases or roles. + +- -x, --no-privileges + + Prevents the dumping of access permissions \(grant/revoke commands\). + +- --column-inserts|--attribute-inserts + + Exports data by running the **INSERT** command with explicit column names **\{INSERT INTO table \(column, ...\) VALUES ...\}**. This will cause a slow restoration. However, since this option generates an independent command for each row, an error in reloading a row causes only the loss of the row rather than the entire table content. + +- --disable-dollar-quoting + + Disables the use of dollar sign \($\) for function bodies, and forces them to be quoted using the SQL standard string syntax. + +- --disable-triggers + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --inserts + + Dumps data by the **INSERT** statement \(rather than **COPY**\). This will cause a slow restoration. The restoration may fail if you rearrange the column order. The **--column-inserts** option is unaffected against column order changes, though even slower. + +- --no-security-labels + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --no-tablespaces + + Does not generate output statements to create tablespaces or select tablespaces for objects. All the objects will be created during the restoration process, no matter which tablespace is selected when using this option. + +- --no-unlogged-table-data + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --quote-all-identifiers + + Forcibly quotes all identifiers. This parameter is useful when you dump a database for migration to a later version, in which additional keywords may be introduced. + +- --dont-overwrite-file + + Does not overwrite the current file. + +- --use-set-session-authorization + + Specifies that the standard SQL **SET SESSION AUTHORIZATION** command rather than **ALTER OWNER** is returned to ensure the object ownership. This makes dumping more standard. However, if a dump file contains objects that have historical problems, restoration may fail. A dump using **SET SESSION AUTHORIZATION** requires the system administrator rights, whereas **ALTER OWNER** requires lower permissions. + +- --with-encryption=AES128 + + Specifies that dumping data needs to be encrypted using AES128. + +- --with-key=KEY + + Specifies that the key length of AES128 must be 16 bytes. + +- --include-templatedb + + Includes template databases during the dump. + +- --binary-upgrade + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --binary-upgrade-usermap="USER1=USER2" + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --tablespaces-postfix + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --parallel-jobs + + Specifies the number of concurrent backup processes. The value range is 1-1000. + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- The **-g/--globals-only** and **-r/--roles-only** parameters do not coexist. +>- The **-g/--globals-only** and **-t/--tablespaces-only** parameters do not coexist. +>- The **-r/--roles-only** and **-t/--tablespaces-only** parameters do not coexist. +>- The **-s/--schema-only** and **-a/--data-only** parameters do not coexist. +>- The **-r/--roles-only** and **-a/--data-only** parameters do not coexist. +>- The **-t/--tablespaces-only** and **-a/--data-only** parameters do not coexist. +>- The **-g/--globals-only** and **-a/--data-only** parameters do not coexist. +>- **--tablespaces-postfix** must be used in conjunction with **--binary-upgrade**. +>- **--binary-upgrade-usermap** must be used in conjunction with **--binary-upgrade**. +>- **--parallel-jobs** must be used in conjunction with **-f/--file**. + +Connection parameters: + +- -h, --host + + Specifies the host name. If the value begins with a slash \(/\), it is used as the directory for the UNIX domain socket. The default value is taken from the PGHOST environment \(if variable\). Otherwise, a Unix domain socket connection is attempted. + + This parameter is used only for defining names of the hosts outside openGauss. The names of the hosts inside openGauss must be 127.0.0.1. + + Environment Variable: _PGHOST_ + +- -l, --database + + Specifies the name of the database connected to dump all objects and discover other databases to be dumped. If this parameter is not specified, the **postgres** database will be used. If the **postgres** database does not exist, **template1** will be used. + +- -p, --port + + TCP port or the local Unix-domain socket file extension on which the server is listening for connections. The default value is the _PGPORT_ environment variable. + + If the thread pool function is enabled, you are advised to use **pooler port**, that is, the listening port number plus 1. + + Environment variable: _PGPORT_ + +- -U, --username + + Specifies the user name to connect to. + + Environment variable: _PGUSER_ + +- -w, --no-password + + Never issues a password prompt. The connection attempt fails if the host requires password verification and the password is not provided in other ways. This parameter is useful in batch jobs and scripts in which no user password is required. + +- -W, --password + + Specifies the user password for connection. If the host uses the trust authentication policy, the administrator does not need to enter the **-W** option. If the **-W** option is not provided and you are not a system administrator, the Dump Restore tool will ask you to enter a password. + +- --role + + Specifies a role name to be used for creating the dump. This option causes **gs\_dumpall** to issue the **SET ROLE** statement after connecting to the database. It is useful when the authenticated user \(specified by **-U**\) lacks the permissions required by the **gs\_dumpall**. It allows the user to switch to a role with the required permissions. Some installations have a policy against logging in directly as a system administrator. This option allows dumping data without violating the policy. + +- --rolepassword + + Specifies the password of the specific role. + + +## Notice + +**gs\_dumpall** internally invokes **gs\_dump**. For details about the diagnosis information, see [gs\_dump](gs_dump.md#EN-US_TOPIC_0250273517). + +Once **gs\_dumpall** is restored, run ANALYZE on each database so that the optimizer can provide useful statistics. + +**gs\_dumpall** requires all needed tablespace directories to exit before the restoration. Otherwise, database creation will fail if the databases are in non-default locations. + +## Examples + +Use **gs\_dumpall** to export all openGauss databases at a time. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>**gs\_dumpall** supports only plain-text format export. Therefore, only **gsql** can be used to restore a file exported using **gs\_dumpall**. + +``` +gs_dumpall -f backup/bkp2.sql -p 37300 +gs_dump[port='37300'][dbname='postgres'][2018-06-27 09:55:09]: The total objects number is 2371. +gs_dump[port='37300'][dbname='postgres'][2018-06-27 09:55:35]: [100.00%] 2371 objects have been dumped. +gs_dump[port='37300'][dbname='postgres'][2018-06-27 09:55:46]: dump database dbname='postgres' successfully +gs_dump[port='37300'][dbname='postgres'][2018-06-27 09:55:46]: total time: 55567 ms +gs_dumpall[port='37300'][2018-06-27 09:55:46]: dumpall operation successful +gs_dumpall[port='37300'][2018-06-27 09:55:46]: total time: 56088 ms +``` + +## Helpful Links + +[gs\_dump](gs_dump.md#EN-US_TOPIC_0250273517), [gs\_restore](gs_restore.md#EN-US_TOPIC_0250273519) + diff --git a/content/en/docs/Administratorguide/gs_restore.md b/content/en/docs/Administratorguide/gs_restore.md new file mode 100644 index 000000000..16aff6327 --- /dev/null +++ b/content/en/docs/Administratorguide/gs_restore.md @@ -0,0 +1,387 @@ +# gs\_restore + +## Background + +**gs\_restore**, provided by openGauss, is used to import data that was exported using **gs\_dump**. It can also be used to import files exported by **gs\_dump**. + +**gs\_restore** is executed by OS user **omm**. + +It has the following functions: + +- Importing data to the database + + If a database is specified, data is imported to the database. For parallel import, the password for connecting to the database is required. + +- Importing data to the script file + + If the database storing imported data is not specified, a script containing the SQL statement to recreate the database is created and written to a file or standard output. This script output is equivalent to the plain text output format of **gs\_dump**. + + +## Command Format + +``` +gs_restore [OPTION]... FILE +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- **FILE** does not have a short or long parameter. It is used to specify the location for the archive files. +>- The **dbname** or **-l** parameter is required as prerequisites. Users cannot enter **dbname** and **-l** parameters at the same time. +>- **gs\_restore** incrementally imports data by default. To prevent data exceptions caused by multiple import operations, you are advised to use the **-c** parameter during the import. Before recreating database objects, delete the database objects that already exist in the database to be restored. +>- There is no option to control log printing. To hide logs, redirect the logs to the log file. If a large amount of table data needs to be restored, the table data will be restored in batches. Therefore, the log indicating that the table data has been imported is generated for multiple times. + +## Parameter Description + +Common parameters + +- -d, --dbname=NAME + + Connects to the **dbname** database and imports data to the database. + +- -f, --file=FILENAME + + Specifies the output file for the generated script, or uses the output file in the list specified using **-l**. + + The default is the standard output. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >**-f** cannot be used in conjunction with **-d**. + +- -F, --format=c|d|t + + Specifies the format of the archive. The format does not need to be specified because the _gs\_restore_ determines the format automatically. + + Value range: + + - **c/custom**: The archive form is the customized format in [gs\_dump](gs_dump.md#EN-US_TOPIC_0250273517). + - **d/directory**: The archive form is a directory archive format. + - **t/tar**: The archive form is a .tar archive format. + +- -l, --list + + Lists the forms of the archive. The operation output can be used for the input of the **-L** parameter. If filtering parameters, such as **-n** or **-t**, are used together with **-l**, they will restrict the listed items. + +- -v, --verbose + + Specifies the verbose mode. + +- -V, --version + + Prints the **gs\_restore** version and exits. + +- -?, --help + + Displays help information about the parameters of **gs\_restore** and exits. + + +Parameters for importing data + +- -a, -data-only + + Imports only the data, not the schema \(data definition\). **gs\_restore** incrementally imports data. + +- -c, --clean + + Cleans \(deletes\) existing database objects in the database to be restored before recreating them. + +- -C, --create + + Creates the database before importing data to it. \(When this parameter is used, the database specified by **-d** is used to issue the initial **CREATE DATABASE** command. All data is imported to the created database.\) + +- -e, --exit-on-error + + Exits if an error occurs when you send the SQL statement to the database. If you do not exit, the commands will still be sent and error information will be displayed when the import ends. + +- -I, --index=NAME + + Imports only the definition of the specified index. Multiple indexes can be imported. Enter **-I**_ index_ multiple times to import multiple indexes. + + For example: + + ``` + gs_restore -h host_name -p port_number -d postgres -I Index1 -I Index2 backup/MPPDB_backup.tar + ``` + + In this example, _Index1_ and _Index2_ will be imported. + +- -j, --jobs=NUM + + Specifies the number of concurrent, the most time-consuming jobs of **gs\_restore** \(such as loading data, creating indexes, or creating constraints\). This parameter can greatly reduce the time to import a large database to a server running on a multiprocessor machine. + + Each job is one process or one thread, depending on the OS; and uses a separate connection to the server. + + The optimal value for this option depends on the server hardware setting, the client, the network, the number of CPU cores, and disk settings. It is recommended that the parameter be set to the number of CPU cores on the server. In addition, a larger value can also lead to faster import in many cases. However, an overly large value will lead to decreased performance because of thrashing. + + This parameter supports custom-format archives only. The input file must be a regular file \(not the pipe file\). This parameter can be ignored when you select the script method rather than connect to a database server. In addition, multiple jobs cannot be used in conjunction with the **--single-transaction** parameter. + +- -L, --use-list=FILENAME + + Imports only archive elements that are listed in **list-file** and imports them in the order that they appear in the file. If filtering parameters, such as **-n** or **-t**, are used in conjunction with **-L**, they will further limit the items to be imported. + + **list-file** is normally created by editing the output of a previous **-l** parameter. File lines can be moved or removed, and can also be commented out by placing a semicolon \(;\) at the beginning of the row. An example is provided in this document. + +- -n, --schema=NAME + + Restores only objects that are listed in schemas. + + This parameter can be used in conjunction with the **-t** parameter to import a specific table. + + Entering **-n **_schemaname_ multiple times can import multiple schemas. + + For example: + + ``` + gs_restore -h host_name -p port_number -d postgres -n sch1 -n sch2 backup/MPPDB_backup.tar + ``` + + In this example, **sch1** and **sch2** will be imported. + +- -O, --no-owner + + Do not output commands to set ownership of objects to match the original database. By default, **gs\_restore** issues the **ALTER OWNER** or **SET SESSION AUTHORIZATION** statement to set ownership of created schema elements. Unless the system administrator or the user who has all the objects in the script initially accesses the database. Otherwise, the statement will fail. Any user name can be used for the initial connection using **-O**, and this user will own all the created objects. + +- -P, --function=NAME\(args\) + + Imports only listed functions. You need to correctly spell the function name and the parameter based on the contents of the dump file in which the function exists. + + Entering **-P** alone means importing all function-name\(args\) functions in a file. Entering **-P** with **-n** means importing the function-name\(args\) functions in a specified schema. Entering **-P** multiple times and using **-n** once means that all imported functions are in the **-n** schema by default. + + You can enter **-n schema-name -P 'function-name\(args\)'** multiple times to import functions in specified schemas. + + For example: + + ``` + gs_restore -h host_name -p port_number -d postgres -n test1 -P 'Func1(integer)' -n test2 -P 'Func2(integer)' backup/MPPDB_backup.tar + ``` + + In this example, both **Func1 \(i integer\)** in the **test1** schema and **Func2 \(j integer\)** in the **test2** schema will be imported. + +- -s, --schema-only + + Imports only schemas \(data definitions\), instead of data \(table content\). The current sequence value will not be imported. + +- -S, --sysadmin=NAME + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- -t, --table=NAME + + Imports only listed table definitions or data, or both. This parameter can be used in conjunction with the **-n** parameter to specify a table object in a schema. When **-n** is not entered, the default schema is PUBLIC. Entering **-n **_schemaname_** -t **_tablename_ multiple times can import multiple tables in a specified schema. + + For example: + + Import **table1** in the **PUBLIC** schema. + + ``` + gs_restore -h host_name -p port_number -d postgres -t table1 backup/MPPDB_backup.tar + ``` + + Import **test1** in the **test1** schema and **test2** in the **test2** schema. + + ``` + gs_restore -h host_name -p port_number -d postgres -n test1 -t test1 -n test2 -t test2 backup/MPPDB_backup.tar + ``` + + Import **table1** in the **PUBLIC** schema and **test1** in the **test1** schema. + + ``` + gs_restore -h host_name -p port_number -d postgres -n PUBLIC -t table1 -n test1 -t table1 backup/MPPDB_backup.tar + ``` + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >**-t** does not support the **schema\_name.table\_name** input format. + +- -T, --trigger=NAME + + This parameter is reserved for extension. + +- -x, --no-privileges/--no-acl + + Prevents the import of access permissions \(**GRANT**/**REVOKE** commands\). + +- -1, --single-transaction + + Executes import as a single transaction \(that is, commands are wrapped in **BEGIN**/**COMMIT**\). + + This parameter ensures that either all the commands are completed successfully or no application is changed. This parameter means **--exit-on-error**. + +- --disable-triggers + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --no-data-for-failed-tables + + By default, table data will be imported even if the statement to create a table fails \(for example, the table already exists\). Data in such table is skipped using this parameter. This operation is useful if the target database already contains the desired table contents. + + This parameter takes effect only when you import data directly into a database, not when you output SQL scripts. + +- --no-security-labels + + Specifies a reserved port for function expansion. This parameter is not recommended. + +- --no-tablespaces + + Tablespaces excluding specified ones All objects will be created during the import process no matter which tablespace is selected when using this option. + +- --section=SECTION + + Imports the listed sections \(such as pre-data, data, or post-data\). + +- --use-set-session-authorization + + Is used for plain-text backup. + + Outputs the **SET SESSION AUTHORIZATION** statement instead of the **ALTER OWNER** statement to determine object ownership. This parameter makes dump more standards-compatible. If the records of objects in exported files are referenced, import may fail. Only administrators can use the **SET SESSION AUTHORIZATION** statement to dump data, and the administrators must manually change and verify the passwords of exported files by referencing the **SET SESSION AUTHORIZATION** statement before import. The **ALTER OWNER** statement requires lower permissions. + +- --with-key=KEY + + Specifies that the key length of AES128 must be 16 bytes. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >If the dump is encrypted, enter the **--with-key=KEY** parameter in the **gs\_restore** command. If it is not entered, you will receive an error message. + >Enter the same key while entering the dump. + >When the dump format is **c** or **t**, the dumped content has been processed, and therefore the input is not restricted by the encryption. + + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>- If any local additions need to be added to the template1 database during the installation, restore the output of **gs\_restore** into an empty database with caution. Otherwise, you are likely to obtain errors due to duplicate definitions of the added objects. To create an empty database without any local additions, copy data from template0 rather than template1. Example: +>``` +>CREATE DATABASE foo WITH TEMPLATE template0; +>``` +>- **gs\_restore** cannot import large objects selectively. For example, it can only import the objects of a specified table. If an archive contains large objects, all large objects will be imported, or none of them will be restored if they are excluded by using **-L**, **-t**, or other parameters. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>1. The **-d/--dbname** and **-f/--file** parameters do not coexist. +>2. The **-s/--schema-only** and **-a/--data-only** parameters do not coexist. +>3. The **-c/--clean** and **-a/--data-only** parameters do not coexist. +>4. When **--single-transaction** is used, **-j/--jobs** must be a single job. +>5. **--role** must be used in conjunction with **--rolepassword**. + +Connection parameters: + +- -h, --host=HOSTNAME + + Specifies the host name. If the value begins with a slash \(/\), it is used as the directory for the UNIX domain socket. The default value is taken from the _PGHOST_ environment variable. If it is not set, a UNIX domain socket connection is attempted. + + This parameter is used only for defining names of the hosts outside openGauss. The names of the hosts inside openGauss must be 127.0.0.1. + +- -p, --port=PORT + + TCP port or the local Unix-domain socket file extension on which the server is listening for connections. The default value is the _PGPORT_ environment variable. + + If the thread pool function is enabled, you are advised to use **pooler port**, that is, the listening port number plus 1. + +- -U, --username=NAME + + Specifies the user name to connect to. + +- -w, --no-password + + Never issues a password prompt. The connection attempt fails if the host requires password verification and the password is not provided in other ways. This parameter is useful in batch jobs and scripts in which no user password is required. + +- -W, --password=PASSWORD + + User password for database connection. If the host uses the trust authentication policy, the administrator does not need to enter the **-W** parameter. If the **-W** parameter is not provided and you are not a system administrator, **gs\_restore** will ask you to enter a password. + +- --role=ROLENAME + + Specifies a role name for the import operation. If this parameter is selected, the **SET ROLE** statement will be issued after **gs\_restore** connects to the database. It is useful when the authenticated user \(specified by **-U**\) lacks the permissions required by **gs\_restore**. This parameter allows the user to switch to a role with the required permissions. Some installations have a policy against logging in directly as the initial user. This parameter allows data to be imported without violating the policy. + +- --rolepassword=ROLEPASSWORD + + Role password. + + +## Example + +Special case: Execute the **gsql** tool. Run the following commands to import the **MPPDB\_backup.sql** file in the export folder \(in plain-text format\) generated by **gs\_dump**/**gs\_dumpall** to the **postgres** database: + +``` +gsql -d postgres -p 5432 -W Bigdata@123 -f /home/omm/test/MPPDB_backup.sql +SET +SET +SET +SET +SET +ALTER TABLE +ALTER TABLE +ALTER TABLE +ALTER TABLE +ALTER TABLE +CREATE INDEX +CREATE INDEX +CREATE INDEX +SET +CREATE INDEX +REVOKE +REVOKE +GRANT +GRANT +total time: 30476 ms +``` + +**gs\_restore** is used to import the files exported by **gs\_dump**. + +Example 1: Execute the **gs\_restore** tool to import the exported **MPPDB\_backup.dmp** file \(custom format\) to the **postgres** database. + +``` +gs_restore -W Bigdata@123 backup/MPPDB_backup.dmp -p 5432 -d postgres +gs_restore: restore operation successful +gs_restore: total time: 13053 ms +``` + +Example 2: Execute the **gs\_restore** tool to import the exported **MPPDB\_backup.tar** file \(.tar format\) to the **postgres** database. + +``` +gs_restore backup/MPPDB_backup.tar -p 5432 -d postgres +gs_restore[2017-07-21 19:16:26]: restore operation successful +gs_restore[2017-07-21 19:16:26]: total time: 21203 ms +``` + +Example 3: Execute the **gs\_restore** tool to import the exported **MPPDB\_backup** file \(directory format\) to the **postgres** database. + +``` +gs_restore backup/MPPDB_backup -p 5432 -d postgres +gs_restore[2017-07-21 19:16:26]: restore operation successful +gs_restore[2017-07-21 19:16:26]: total time: 21003 ms +``` + +Example 4: Execute the **gs\_restore** tool and run the following commands to import the **MPPDB\_backup.dmp** file \(in custom format\). Specifically, import all the object definitions and data in the **PUBLIC** schema. Existing objects are deleted from the target database before the import. If an existing object references to an object in another schema, you need to manually delete the referenced object first. + +``` +gs_restore backup/MPPDB_backup.dmp -p 5432 -d postgres -e -c -n PUBLIC +gs_restore: [archiver (db)] Error while PROCESSING TOC: +gs_restore: [archiver (db)] Error from TOC entry 313; 1259 337399 TABLE table1 gaussdba +gs_restore: [archiver (db)] could not execute query: ERROR: cannot drop table table1 because other objects depend on it +DETAIL: view t1.v1 depends on table table1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. + Command was: DROP TABLE public.table1; +``` + +Manually delete the referenced object and create it again after the import is complete. + +``` +gs_restore backup/MPPDB_backup.dmp -p 5432 -d postgres -e -c -n PUBLIC +gs_restore[2017-07-21 19:16:26]: restore operation successful +gs_restore[2017-07-21 19:16:26]: total time: 2203 ms +``` + +Example 5: Execute the **gs\_restore** tool and run the following commands to import the **MPPDB\_backup.dmp** file \(in custom format\). Specifically, import only the definition of **table1** in the **PUBLIC** schema. + +``` +gs_restore backup/MPPDB_backup.dmp -p 5432 -d postgres -e -c -s -n PUBLIC -t table1 +gs_restore[2017-07-21 19:16:26]: restore operation successful +gs_restore[2017-07-21 19:16:26]: total time: 21000 ms +``` + +Example 6: Execute the **gs\_restore** tool and run the following commands to import the **MPPDB\_backup.dmp** file \(in custom format\). Specifically, import only the data of **table1** in the **PUBLIC** schema. + +``` +gs_restore backup/MPPDB_backup.dmp -p 5432 -d postgres -e -a -n PUBLIC -t table1 +gs_restore[2017-07-21 19:16:26]: restore operation successful +gs_restore[2017-07-21 19:16:26]: total time: 20203 ms +``` + +## Helpful Links + +[gs\_dump](gs_dump.md#EN-US_TOPIC_0250273517) and [gs\_dumpall](gs_dumpall.md#EN-US_TOPIC_0250273518) + diff --git a/content/en/docs/Administratorguide/log-overview.md b/content/en/docs/Administratorguide/log-overview.md new file mode 100644 index 000000000..21b096e1a --- /dev/null +++ b/content/en/docs/Administratorguide/log-overview.md @@ -0,0 +1,55 @@ +# Log Overview + +During database running, a large number of logs are generated, including write-ahead logs \(WALs, also called Xlogs\) for ensuring database security and reliability and run logs and operation logs for daily database maintenance. If the database is faulty, you can refer to these logs to locate the fault and restore the database. + +## Log Type + +The following table describes details about log types. + +**Table 1** Log types + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Type

+

Description

+

System log

+

Logs generated during database running. They are used to record abnormal process information.

+

Operation log

+

Logs generated when a client tool (such as gs_guc) is operating databases.

+

Trace log

+

Logs generated after the database debug switch is enabled. They are used to analyze database exceptions.

+

Black box log

+

Logs generated when the database system breaks down. You can analyze the process context when the fault occurs based on the heap and stack information in the logs to facilitate fault locating. A black box dumps stack, heap, and register information about processes and threads when a system breaks down.

+

Audit log

+

Logs used to record some of the database user operations after the database audit function is enabled.

+

WAL

+

Logs used to restore a damaged database. They are also called redo logs. You are advised to routinely back up WALs.

+

Performance log

+

Logs used to record the status of physical resources and the performance of access to external resources (such as disks, OBS and Hadoop clusters).

+
+ diff --git a/content/en/docs/Administratorguide/log-reference.md b/content/en/docs/Administratorguide/log-reference.md new file mode 100644 index 000000000..5445c73bb --- /dev/null +++ b/content/en/docs/Administratorguide/log-reference.md @@ -0,0 +1,15 @@ +# Log Reference + +- **[Log Overview](log-overview.md)** + +- **[System Logs](system-logs.md)** + +- **[Operation Logs](operation-logs.md)** + +- **[Audit Logs](audit-logs.md)** + +- **[WALs](wals.md)** + +- **[Performance Logs](performance-logs.md)** + + diff --git a/content/en/docs/Administratorguide/logical-backup-and-restoration.md b/content/en/docs/Administratorguide/logical-backup-and-restoration.md new file mode 100644 index 000000000..3831d99f8 --- /dev/null +++ b/content/en/docs/Administratorguide/logical-backup-and-restoration.md @@ -0,0 +1,9 @@ +# Logical Backup and Restoration + +- **[gs\_dump](gs_dump.md)** + +- **[gs\_dumpall](gs_dumpall.md)** + +- **[gs\_restore](gs_restore.md)** + + diff --git a/content/en/docs/Administratorguide/operation-logs.md b/content/en/docs/Administratorguide/operation-logs.md new file mode 100644 index 000000000..73ccbac0d --- /dev/null +++ b/content/en/docs/Administratorguide/operation-logs.md @@ -0,0 +1,30 @@ +# Operation Logs + +Operation logs are generated when database tools are used by a database administrator or invoked by a cluster. If the cluster is faulty, you can backtrack user operations on the database and reproduce the fault based on the operation logs. + +## Log Storage Directory + +The default path is _$GAUSSLOG_**/bin**. If the environmental variable _$GAUSSLOG_ does not exist or its value is empty, the log information generated for a tool will be displayed, but not recorded in the log file of the tool. + +The default value of _$GAUSSLOG_ is **/var/log/gaussdb/**_username_. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If a database is deployed using the OM script, the log path is **/var/log/gaussdb/**_username_. + +## Log Naming Rules + +The log file name format is as follows: + +- _tool name_**-**_log creation time_**.log** +- _tool name_**-**_log creation time_**-current.log** + +_tool name_**-**_log creation time_**.log** is a historical log file, and _tool name_**-**_log creation time_**-current.log** is a current log file. + +If the size of a log file exceeds 16 MB, the next time the tool is invoked, the log file is renamed in the historical log file name format, and a new log file is generated at the current time point. + +For example, **gs\_guc-2015-01-16\_183728-current.log** is renamed as **gs\_guc-2015-01-16\_183728.log**, and **gs\_guc-2015-01-17\_142216-current.log** is generated. + +## Maintenance Suggestions + +You are advised to dump expired logs periodically to save disk space and prevent important logs from being lost. + diff --git a/content/en/docs/Administratorguide/overview.md b/content/en/docs/Administratorguide/overview.md new file mode 100644 index 000000000..ed658dbb4 --- /dev/null +++ b/content/en/docs/Administratorguide/overview.md @@ -0,0 +1,103 @@ +# Overview + +For database security purposes, openGauss provides two backup types, multiple backup and restoration solutions, and data reliability assurance mechanisms. + +Backup and restoration can be logically or physically performed. + +- Logical backup and restoration: backs up data by logically exporting data. This method can dump data that is backed up at a certain time point, and restore data only to this backup point. A logical backup does not back up data processed between failure occurrence and the last backup. It applies to scenarios where data rarely changes. Such data damaged due to misoperation can be quickly restored using a logical backup. To restore all the data in a database through logical backup, rebuild a database and import the backup data. Logical backup is not recommended for databases requiring high data availability because it takes a long time for data restoration. Logical backup is a major approach to migrate and transfer data because it can be performed on any platform. +- Physical backup and restoration: copies physical files in the unit of disk blocks from the primary node to the standby node to back up a database. A database can be restored using backup files, such as data files and archive log files. Physical backup is usually used for full backup, quickly backing up and restoring data with low costs if properly planned. + + The two data backup and restoration solutions supported by openGauss are as follows. Methods for restoring data in case of an exception differ for different backup and restoration solutions. + + **Table 1** Comparison between logical and physical backup and restoration + + + + + + + + + + + + + + + + + + +

Backup Type

+

Application Scenario

+

Media

+

Advantage and Disadvantage

+

Logical backup and restoration

+

Small volume of data needs to be processed.

+

Currently, it is used for the backup and restoration of one or more tables.

+
  • Disk
  • SSD
+

Backing up a small amount of data (for specified objects) is efficient and flexible.

+

For a large volume of data, backup requires a long period of time.

+

Physical backup and restoration

+

Huge volume of data needs to be processed. It is mainly used for full backup and restoration as well as the backup and restoration of all WAL archive and run logs in the database.

+

Backing up a huge amount of data is efficient.

+
+ + While backing up and restoring data, take the following aspects into consideration: + + - Whether the impact of data backup on services is acceptable + - Database restoration efficiency + + To minimize the impact of database faults, try to minimize the restoration duration, achieving the highest restoration efficiency. + + - Data restorability + + Minimize data loss after the database is invalidated. + + - Database restoration cost + + There are many factors that need to be considered while you select a backup policy on the live network, such as backup objects, data volume, and network configuration. [Table 2](#en-us_topic_0237088826_en-us_topic_0100209712_table1179095017218) lists available backup policies and applicable scenarios for each backup policy. + + **Table 2** Backup policies and scenarios + + + + + + + + + + + + + + + + + + + +

Backup Policy

+

Key Performance Factor

+

Typical Data Volume

+

Performance Specifications

+

Cluster backup

+
  • Data amount
  • Network configuration
+

Data volume: PB level

+

Object quantity: about 1 million

+

Backup:

+
  • Data transfer rate on each host: 80 Mbit/s (NBU/EISOO+Disk)
  • Disk I/O rate (SSD/HDD): about 90%
+

Table backup

+
  • Schema where the table to be backed up resides
  • Network configuration (NBU)
+

Data volume: 10 TB level

+

Backup: depends on query performance rate and I/O rate

+
NOTE:

For multi-table backup, the backup time is calculated as follows:

+
Total time = Number of tables x Starting time + Total data volume/Data backup speed
+

In the preceding information:

+
  • The starting time of a disk is about 5s. The starting time of an NBU is longer than that of a disk (depending on the NBU deployment).
  • The data backup speed is about 50 MB/s on a single node. (The speed is evaluated based on the backup of a 1 GB table from a physical host to a local disk.)
+

The smaller the table is, the lower the backup performance will be.

+
+
+ + diff --git a/content/en/docs/Administratorguide/performance-logs.md b/content/en/docs/Administratorguide/performance-logs.md new file mode 100644 index 000000000..591040a93 --- /dev/null +++ b/content/en/docs/Administratorguide/performance-logs.md @@ -0,0 +1,22 @@ +# Performance Logs + +Performance logs focus on the access performance of external resources. Performance logs are used to record the status of physical resources and the performance of access to external resources \(such as disks, OBS and Hadoop clusters\). When a performance issue occurs, you can locate the cause using performance logs, which greatly improves troubleshooting efficiency. + +## Log Storage Directory + +The performance logs of CNs and DNs are stored in the directories under _$GAUSSLOG_**/gs\_profile**. + +## Log Naming Rules + +The name format of CN and DN performance logs is: + +**postgresql-**_creation time_**.prf** + +By default, a new log file is generated at 0:00 every day, or when the latest log file exceeds 20 MB or a database instance \(CN or DN\) is restarted. + +## Log Content Description + +Content of a line in a CN or DN log: + +_Host name_+_Date_+_Time_+_Instance name_+_Thread number_+_Log content_ + diff --git a/content/en/docs/Administratorguide/physical-backup-and-restoration.md b/content/en/docs/Administratorguide/physical-backup-and-restoration.md new file mode 100644 index 000000000..79ead2beb --- /dev/null +++ b/content/en/docs/Administratorguide/physical-backup-and-restoration.md @@ -0,0 +1,5 @@ +# Physical Backup and Restoration + +- **[gs\_basebackup](gs_basebackup.md)** + + diff --git a/content/en/docs/Administratorguide/public_sys-resources/icon-caution.gif b/content/en/docs/Administratorguide/public_sys-resources/icon-caution.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 GIT binary patch literal 580 zcmV-K0=xZ3Nk%w1VIu$?0Hp~4{QBgqmQ+MG9K51r{QB&)np^||1PlfQ%(86!{`~yv zv{XhUWKt}AZaiE{EOcHp{O-j3`t;<+eEiycJT4p@77X;(jQsMfB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Administratorguide/public_sys-resources/icon-danger.gif b/content/en/docs/Administratorguide/public_sys-resources/icon-danger.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 GIT binary patch literal 580 zcmV-K0=xZ3Nk%w1VIu$?0Hp~4{QBgqmQ+MG9K51r{QB&)np^||1PlfQ%(86!{`~yv zv{XhUWKt}AZaiE{EOcHp{O-j3`t;<+eEiycJT4p@77X;(jQsMfB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Administratorguide/public_sys-resources/icon-note.gif b/content/en/docs/Administratorguide/public_sys-resources/icon-note.gif new file mode 100644 index 0000000000000000000000000000000000000000..6314297e45c1de184204098efd4814d6dc8b1cda GIT binary patch literal 394 zcmZ?wbhEHblx7fPSjxcg=ii?@_wH=jwxy=7CMGH-B`L+l$wfv=#>UF#$gv|VY%C^b zCQFtrnKN(Bo_%|sJbO}7RAORe!otL&qo<>yq_Sq+8Xqqo5h0P3w3Lvb5E(g{p01vl zxR@)KuDH0l^z`+-dH3eaw=XqSH7aTIx{kzVBN;X&hha0dQSgWuiw0NWUvMRmkD|> literal 0 HcmV?d00001 diff --git a/content/en/docs/Administratorguide/public_sys-resources/icon-notice.gif b/content/en/docs/Administratorguide/public_sys-resources/icon-notice.gif new file mode 100644 index 0000000000000000000000000000000000000000..86024f61b691400bea99e5b1f506d9d9aef36e27 GIT binary patch literal 406 zcmV;H0crk6Nk%w1VIu$@0J8u9|NsB@_xJDb@8;&_*4Ea}&d#;9wWXz{jEszHYim+c zQaU<1At50E0000000000A^8Le000gEEC2ui03!e%000R7038S%NU)&51O^i-Tu6`s z0)`MFE@;3YqD6xSC^kTNu_J>91{PH8XfZ(p1pp2-SU@u3#{mEUC}_}tg3+I#{z}{Ok@D_ZUDg- zt0stin4;pC8M{WLSlRH*1pzqEw1}3oOskyNN?j;7HD{BBZ*OEcv4HK!6Bk6beR+04 z&8}k>SkTusVTDmkyOz#5fCA$JTPGJVQvr3uZ?QzzPQFvD0rGf_PdrcF`pMs}p^BcF zKtKTd`0wipR%nKN&Wj+V}pX;WC3SdJV!a_8Qi zE7z`U*|Y^H0^}fB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Administratorguide/querying-status.md b/content/en/docs/Administratorguide/querying-status.md new file mode 100644 index 000000000..2eec11bae --- /dev/null +++ b/content/en/docs/Administratorguide/querying-status.md @@ -0,0 +1,106 @@ +# Querying Status + +## Background + +You can query the status of openGauss in openGauss. The query result shows whether openGauss or a single host is running properly. + +## Prerequisites + +openGauss has been started. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to query the openGauss status: + + ``` + gs_om -t status --detail + ``` + + [Table 1](#en-us_topic_0237088790_table9610118112610) describes parameters in the query result. + + To query the status of instances on a host and the status of other instances which form a primary/secondary relationship with instances on the host, add **-h** to the command. For example: + + ``` + gs_om -t status -h plat2 --detail + ``` + + **plat2** indicates the name of the host to be queried. + + To query the status of instances on the host you logged in to, run the following command: + + ``` + gs_om -t status -h plat1 + ``` + + +## Parameter Description + +**Table 1** Status description + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Field

+

Description

+

Value

+

cluster_state

+

openGauss status, which indicates whether openGauss is running properly.

+
  • Normal: openGauss is available and the data has redundancy backup. All the processes are running and the primary/standby relationship is normal.
  • Unavailable: openGauss is unavailable.
  • Degraded: openGauss is available and faulty database nodes and primary database nodes exist.
+

node

+

Host name.

+

Specifies the name of the host where the instance is located. If multiple AZs exist, the AZ IDs will be displayed.

+

node_ip

+

Host IP Address.

+

Specifies the IP address of the host where the instance is located.

+

instance

+

Instance ID.

+

Specifies the instance ID.

+

state

+

Instance status.

+
  • Primary: The instance is a primary instance.
  • Standby: The instance is a standby instance.
  • Secondary: The instance is a secondary instance.
  • Pending: The instance is in the quorum phase.
  • Unknown: The instance status is unknown.
  • Down: The instance is down.
+
+ +## Examples + +View openGauss status details, including instance status. + +``` +gs_om -t status --detail +[ Cluster State ] + +cluster_state : Normal +redistributing : No +current_az : AZ_ALL + +[ Datanode State ] + +node node_ip instance state | node node_ip instance state +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +1 pekpopgsci00235 10.244.62.204 6001 /opt/gaussdb/cluster/data/dn1 P Primary Normal | 2 pekpopgsci00238 10.244.61.81 6002 /opt/gaussdb/cluster/data/dn1 S Standby Normal +``` + diff --git a/content/en/docs/Administratorguide/risky-operations.md b/content/en/docs/Administratorguide/risky-operations.md new file mode 100644 index 000000000..1cbca475e --- /dev/null +++ b/content/en/docs/Administratorguide/risky-operations.md @@ -0,0 +1,98 @@ +# Risky Operations + +Perform operations strictly following instructions provided in the guide. Do not perform the following risky operations. + +[Table 1](#en-us_topic_0237088894_en-us_topic_0059777750_t725e2ece7f7a4d5f962b2c314e7e836b) describes forbidden operations during routine O&M. + +**Table 1** Forbidden operations + + + + + + + + + + + + + +

Forbidden Operation

+

Risk

+

Modify the file name, permission, or content, or delete any content in the data directory.

+

Serious errors occur on DNs and cannot be fixed.

+

Delete database system catalogs or their data.

+

Service operations cannot be properly performed.

+
+ +[Table 2](#en-us_topic_0237088894_en-us_topic_0059777750_tf7f1e2a0747f496baffa365814127a3c) describes risky operations during routine O&M. + +**Table 2** Risky operations + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Category

+

Risky Operation

+

Risk

+

Risk Level

+

Preventive Measure

+

Check Item

+

Database

+

Open a configuration file and manually modify the port number.

+

The database fails to be started or connected.

+

▲▲▲▲▲

+

Use the required tool to modify the port number.

+

None

+

Incautiously modify the content of the pg_hba.conf file.

+

The client fails to be connected.

+

▲▲▲▲▲

+

Strictly follow the instructions provided in product documentation while you modify this file.

+

None

+

Manually modify the pg_xlog file.

+

Database startup fails and data becomes inconsistent.

+

▲▲▲▲▲

+

Use the required tool to modify this file.

+

None

+

Job

+

Run the kill -9 command to terminate a job process.

+

System resources occupied by this job cannot be released.

+

▲▲▲

+

Log in to the database and use the pg_terminate_backend or pg_cancel_backend function to terminate the job, or press Ctrl+C to terminate the job process.

+

Resource usage

+
+ diff --git a/content/en/docs/Administratorguide/routine-maintenance-check-items.md b/content/en/docs/Administratorguide/routine-maintenance-check-items.md new file mode 100644 index 000000000..7068554f2 --- /dev/null +++ b/content/en/docs/Administratorguide/routine-maintenance-check-items.md @@ -0,0 +1,170 @@ +# Routine Maintenance Check Items + +## Checking openGauss Status + +openGauss provides tools to check database and instance status, ensuring that databases and instances are running properly to provide data services. + +- Check instance status. + + ``` + gs_check -U omm -i CheckClusterState + ``` + +- Check parameters. + + ``` + postgres=# SHOW parameter_name; + ``` + +- Modify parameters. + + ``` + gs_guc reload -D /gaussdb/data/dbnode -c "paraname=value" + ``` + + +## Checking Lock Information + +The lock mechanism is an important method to ensure data consistency. Information check helps learn database transactions and database running status. + +- Query lock information in the database. + + ``` + postgres=# SELECT * FROM pg_locks; + ``` + +- Query the status of threads waiting to acquire locks. + + ``` + postgres=# SELECT * FROM pg_thread_wait_status WHERE wait_status = 'acquire lock'; + ``` + +- Query the status of events waiting to acquire locks. + + ``` + postgres=# SELECT node_name, thread_name, tid, wait_status, query_id FROM pgxc_thread_wait_status WHERE wait_status = 'acquire lock'; + ``` + +- Kill a system process. + + Search for a system process that is running and run the following command to end the process: + + ``` + ps ux + kill -9 pid + ``` + + +## Collecting Event Statistics + +Long-time running of SQL statements will occupy a lot of system resources. You can check event occurrence time and occupied memory to learn about database running status. + +- Query the time points about an event. + + Run the following command to query the thread start time, transaction start time, SQL start time, and status change time of the event: + + ``` + postgres=# SELECT backend_start,xact_start,query_start,state_change FROM pg_stat_activity; + ``` + +- Query the number of sessions on the current server. + + ``` + postgres=# SELECT count(*) FROM pg_stat_activity; + ``` + +- Collect system-level statistics. + + Run the following command to query information about the session that uses the maximum memory: + + ``` + postgres=# SELECT * FROM pv_session_memory_detail() ORDER BY usedsize desc limit 10; + ``` + + +## Checking Objects + +Tables, indexes, partitions, and constraints are key storage objects of a database. A database administrator needs to routinely maintain key information and these objects. + +- View table details. + + ``` + postgres=# \d+ table_name + ``` + +- Query table statistics. + + ``` + postgres=# SELECT * FROM pg_statistic; + ``` + +- View index details. + + ``` + postgres=# \d+ index_name + ``` + +- Query partitioned table information. + + ``` + postgres=# SELECT * FROM pg_partition; + ``` + +- Collect statistics. + + Run the **ANALYZE** statement to collect related statistics on the database. + + Run the **VACUUM** statement to reclaim space and update statistics. + +- Query constraint information. + + ``` + postgres=# SELECT * FROM pg_constraint; + ``` + + +## Checking an SQL Report + +Run the **EXPLAIN** statement to view execution plans. + +## Backing Up Data + +Never forget to back up data. During the routine work, the backup execution and backup data validity need to be checked to ensure data security and encryption security. + +- Export a specified user. + + ``` + gs_dump dbname -p port -f out.sql -U user_name -W password + ``` + +- Export a schema. + + ``` + gs_dump dbname -p port -n schema_name -f out.sql + ``` + +- Export a table. + + ``` + gs_dump dbname -p port -t table_name -f out.sql + ``` + + +## Checking Basic Information + +Basic information includes versions, components, and patches. Periodic database information checks and records are important for database life cycle management. + +- Check version information. + + ``` + postgres=# SELECT version(); + ``` + +- Check table size and database size. + + ``` + postgres=# SELECT pg_table_size('table_name'); + postgres=# SELECT pg_database_size('database_name'); + ``` + + diff --git a/content/en/docs/Administratorguide/routine-maintenance.md b/content/en/docs/Administratorguide/routine-maintenance.md new file mode 100644 index 000000000..8430ed3e0 --- /dev/null +++ b/content/en/docs/Administratorguide/routine-maintenance.md @@ -0,0 +1,23 @@ +# Routine Maintenance + +- **[Routine Maintenance Check Items](routine-maintenance-check-items.md)** + +- **[Checking OS Parameters](checking-os-parameters.md)** + +- **[Checking openGauss Health Status](checking-opengauss-health-status.md)** + +- **[Checking Database Performance](checking-database-performance.md)** + +- **[Checking and Deleting Logs](checking-and-deleting-logs.md)** + +- **[Checking Time Consistency](checking-time-consistency.md)** + +- **[Checking the Number of Application Connections](checking-the-number-of-application-connections.md)** + +- **[Routinely Maintaining Tables](routinely-maintaining-tables.md)** + +- **[Routinely Recreating an Index](routinely-recreating-an-index.md)** + +- **[Data Security Maintenance Suggestions](data-security-maintenance-suggestions.md)** +To ensure data security in GaussDB Kernel and prevent accidents, such as data loss and illegal data access, read this section carefully. + diff --git a/content/en/docs/Administratorguide/routinely-maintaining-tables.md b/content/en/docs/Administratorguide/routinely-maintaining-tables.md new file mode 100644 index 000000000..dd0707b64 --- /dev/null +++ b/content/en/docs/Administratorguide/routinely-maintaining-tables.md @@ -0,0 +1,106 @@ +# Routinely Maintaining Tables + +To ensure proper database running, after insert and delete operations, you need to routinely run **VACUUM FULL** and **ANALYZE** as appropriate for customer scenarios and update statistics to obtain better performance. + +## Related Concepts + +You need to routinely run **VACUUM**, **VACUUM FULL**, and **ANALYZE** to maintain tables, because: + +- **VACUUM FULL** can be used to reclaim disk space occupied by updated or deleted data and combine small-size data files. +- **VACUUM** can be used to maintain a visualized mapping for each table to track pages that contain arrays visible to other active transactions. A common index scan uses the mapping to obtain the corresponding arrays and check whether the arrays are visible to the current transaction. If the arrays cannot be obtained, capture a batch of arrays to check the visibility. Therefore, updating the visualized mapping of a table can accelerate unique index scans. +- Running **VACUUM** can avoid original data loss caused by duplicate transaction IDs when the number of executed transactions exceeds the database threshold. +- **ANALYZE** can be used to collect statistics on tables in databases. The statistics are stored in the system catalog **PG\_STATISTIC**. Then the query optimizer uses the statistics to work out the most efficient execution plan. + +## Procedure + +1. Run the **VACUUM** or **VACUUM FULL** command to reclaim disk space. + - **VACUUM**: + + Run **VACUUM** for a table. + + ``` + postgres=# VACUUM customer; + ``` + + ``` + VACUUM + ``` + + This statement can be concurrently executed with database operation commands, including **SELECT**, **INSERT**, **UPDATE**, and **DELETE**; excluding **ALTER TABLE**. + + Run **VACUUM** for the table partition. + + ``` + postgres=# VACUUM customer_par PARTITION ( P1 ); + ``` + + ``` + VACUUM + ``` + + - **VACUUM FULL**: + + ``` + postgres=# VACUUM FULL customer; + ``` + + ``` + VACUUM + ``` + + During the command running, exclusive locks need to be added to the table and all other database operations need to be suspended. + +2. Run **ANALYZE** to update statistics. + + ``` + postgres=# ANALYZE customer; + ``` + + ``` + ANALYZE + ``` + + Run **ANALYZE VERBOSE** to update statistics and display table information. + + ``` + postgres=# ANALYZE VERBOSE customer; + ``` + + ``` + ANALYZE + ``` + + You can run **VACUUM ANALYZE** at the same time to optimize the query. + + ``` + postgres=# VACUUM ANALYZE customer; + ``` + + ``` + VACUUM + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >**VACUUM** and **ANALYZE** cause a substantial increase in I/O traffic, which may affect other active sessions. Therefore, you are advised to set the cost-based vacuum delay feature by specifying the **vacuum\_cost\_delay** parameter. For details, see "GUC Parameters \> Resource Consumption \> Cost-based Vacuum Delay" in the _Developer Guide_. + +3. Delete a table. + + ``` + postgres=# DROP TABLE customer; + postgres=# DROP TABLE customer_par; + postgres=# DROP TABLE part; + ``` + + If the following information is displayed, the tables have been deleted: + + ``` + DROP TABLE + ``` + + +## Maintenance Suggestions + +- Routinely run **VACUUM FULL** for large tables. If the database performance deteriorates, run **VACUUM FULL** for the entire database. If the database performance is stable, you are advised to run **VACUUM FULL** monthly. +- Routinely run **VACUUM FULL** on system catalogs, especially **PG\_ATTRIBUTE**. +- Enable automatic vacuum processes \(**AUTOVACUUM**\) in the system. The processes automatically run the **VACUUM** and **ANALYZE** statements to reclaim the record space marked as the deleted state and update statistics in the table. + diff --git a/content/en/docs/Administratorguide/routinely-recreating-an-index.md b/content/en/docs/Administratorguide/routinely-recreating-an-index.md new file mode 100644 index 000000000..4ad70ca62 --- /dev/null +++ b/content/en/docs/Administratorguide/routinely-recreating-an-index.md @@ -0,0 +1,64 @@ +# Routinely Recreating an Index + +## Background + +When data deletion is repeatedly performed in the database, index keys will be deleted from the index pages, resulting in index bloat. Recreating an index routinely improves query efficiency. + +The database supports B-tree indexes. Recreating a B-tree index routinely helps improve query efficiency. + +- If a large amount of data is deleted, index keys on the index pages will be deleted. As a result, the number of index pages reduces and index bloat occurs. Recreating an index helps reclaim wasted space. +- In a newly created index, pages with adjacent logical structures tend to have adjacent physical structures. Therefore, a new index achieves a higher access speed than an index that has been updated for multiple times. + +## Methods + +Use either of the following two methods to recreate an index: + +- Run the **DROP INDEX** statement to delete the index and run the **CREATE INDEX** statement to create an index. + + When you delete an index, a temporary exclusive lock is added in the parent table to block related read/write operations. During index creation, the write operation is locked, whereas the read operation is not locked and can use only sequential scans. + +- Run **REINDEX** to recreate an index. + - When you run the **REINDEX TABLE** statement to recreate an index, an exclusive lock is added to block related read/write operations. + - When you run the **REINDEX INTERNAL TABLE** statement to recreate an index for a **desc** table \(such as column-store **cudesc** table\), an exclusive lock is added to block related read/write operations on the table. + + +## Procedure + +Assume the ordinary index **areaS\_idx** exists in the **area\_id** column of the imported table **areaS**. Use either of the following two methods to recreate an index: + +- Run the **DROP INDEX** statement to delete the index and run the **CREATE INDEX** statement to create an index. + 1. Delete the index. + + ``` + postgres=# DROP INDEX areaS_idx; + DROP INDEX + ``` + + 2. Create an index + + ``` + postgres=# CREATE INDEX areaS_idx ON areaS (area_id); + CREATE INDEX + ``` + + +- Run **REINDEX** to recreate an index. + - Run **REINDEX TABLE** to recreate an index. + + ``` + postgres=# REINDEX TABLE areaS; + REINDEX + ``` + + - Run **REINDEX INTERNAL TABLE** to recreate an index for a **desc** table \(such as column-store **cudesc** table\). + + ``` + postgres=# REINDEX INTERNAL TABLE areaS; + REINDEX + ``` + + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>Before you recreate an index, you can increase the values of **maintenance\_work\_mem** and **psort\_work\_mem** to accelerate the index recreation. + diff --git a/content/en/docs/Administratorguide/starting-and-stopping-opengauss.md b/content/en/docs/Administratorguide/starting-and-stopping-opengauss.md new file mode 100644 index 000000000..f8b5c8c3e --- /dev/null +++ b/content/en/docs/Administratorguide/starting-and-stopping-opengauss.md @@ -0,0 +1,62 @@ +# Starting and Stopping openGauss + +## Starting openGauss + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to start openGauss: + + ``` + gs_om -t start + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >An HA cluster must be started in HA mode. If the cluster is started in standalone mode, you need to restore the HA relationship by running the **gs\_ctl build** command. For details about how to use the **gs\_ctl** tool, see the _openGauss Tool Reference_. + + +## Stopping openGauss + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to stop openGauss: + + ``` + gs_om -t stop + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >For details about how to start and stop nodes and availability zones \(AZs\), see "Server Tools \> gs\_om" in the _openGauss Tool Reference_. + + +## Examples + +Start openGauss: + +``` +gs_om -t start +Starting cluster. +========================================= +========================================= +Successfully started. + +``` + +Stop openGauss: + +``` +gs_om -t stop +Stopping cluster. +========================================= +Successfully stopped cluster. +========================================= +End stop cluster. +``` + +## Troubleshooting + +If starting or stopping openGauss fails, troubleshoot the problem based on log information. For details, see [Log Reference](log-reference.md). + +If the startup fails due to timeout, you can run the following command to set the startup timeout interval, which is 300s by default: + +``` +gs_om -t start --time-out=300 +``` + diff --git a/content/en/docs/Administratorguide/system-logs.md b/content/en/docs/Administratorguide/system-logs.md new file mode 100644 index 000000000..863e8fb8c --- /dev/null +++ b/content/en/docs/Administratorguide/system-logs.md @@ -0,0 +1,25 @@ +# System Logs + +System logs include those generated by database nodes when openGauss is running, and those generated when openGauss is deployed. If an error occurs during openGauss running, you can locate the cause and troubleshoot it based on system logs. + +## Log Storage Directory + +Run logs of database nodes are stored in the corresponding folders in the **/var/log/gaussdb/**_username_**/pg\_log** directory. + +Logs generated during OM openGauss installation and uninstallation are stored in the **/var/log/gaussdb/**_username_**/om** directory. + +## Log Naming Rules + +The name format of database node run logs is: + +**postgresql-**_creation time_**.log** + +By default, a new log file is generated at 0:00 every day, or when the latest log file exceeds 16 MB or a database instance \(database node\) is restarted. + +## Log Content Description + +- Content of a line in a database node log: + + Date+Time+Time zone+Username+Database name+Session ID+Log level+Log content + + diff --git a/content/en/docs/Administratorguide/wals.md b/content/en/docs/Administratorguide/wals.md new file mode 100644 index 000000000..e4dd333b8 --- /dev/null +++ b/content/en/docs/Administratorguide/wals.md @@ -0,0 +1,29 @@ +# WALs + +In a system using write-ahead logs \(WALs or Xlogs\), all data file modifications are written to a log before they are applied. That is, the corresponding log must be written into a permanent memory before a data file is modified. You can use WALs to restore the cluster if the system crashes. + +## Log Storage Directory + +Take a DN as an example. Its WALs are stored in the **/gaussdb/data/data\_dn****/pg\_xlog** directory. + +**/gaussdb/data/data\_dn** is the data directory of a node in the cluster. + +## Log Naming Rules + +Log files are saved as segment files. Each segment is 16 MB and is divided into multiple 8 KB pages. The name of a WAL file consists of 24 hexadecimal characters. Each name has three parts, with each part having eight hexadecimal characters. The first part indicates the time line, the second part indicates the log file identifier, and the third part indicates the file segment identifier. A time line starts from 1, and a log file identifier and a file segment identifier start from 0. + +For example, the name of the first transaction log is **000000010000000000000000**. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>The numbers in each part are used in ascending order in succession. Exhausting all available numbers takes a long time, and the numbers will start from zero again after they reach the maximum. + +## Log Content Description + +The content of WALs depends on the types of recorded transactions. WALs can be used to restore a system after the system breaks down. + +By default, GaussDB Kernel reads WALs for system restoration during each startup. + +## Maintenance Suggestions + +WALs are important for database restoration. You are advised to routinely back up WALs. + diff --git a/content/en/docs/Compilationguide/Compilation.md b/content/en/docs/Compilationguide/Compilation.md new file mode 100644 index 000000000..ac05d861b --- /dev/null +++ b/content/en/docs/Compilationguide/Compilation.md @@ -0,0 +1,12 @@ +# Compilation Guide + +## Purpose + +This document helps you quickly understand the software and hardware requirements, environment configuration, and how to compile software or installation packages from the source code. + +## Overview + +This document describes the requirements of openGauss for the operating system, compilation environment, software dependency, compilation method, and storage location of compilation results. + + + diff --git a/content/en/docs/Compilationguide/compiling-open-source-software.md b/content/en/docs/Compilationguide/compiling-open-source-software.md new file mode 100644 index 000000000..61872e5ed --- /dev/null +++ b/content/en/docs/Compilationguide/compiling-open-source-software.md @@ -0,0 +1,76 @@ +# Compiling Open-source Software + +## Compiling Open-source Software + +Before compiling the openGauss, compile and build the open-source and third-party software on which the openGauss depends. These open-source and third-party software is stored in the openGauss-third\_party code repository and usually needs to be built only once. If the open-source software is updated, rebuild the software. + +Since this step takes a long time, we have compiled and built **binarylibs** using **openGauss-third\_party** and compress and upload **binarylibs** to the Internet. You can download the compressed package by referring to [Downloading Code](downloading-code.md). + +**Table 1** Requirements for the openGauss open-source and third-party software before compilation + + + + + + + + + + + + + + + + + + + + + + + + + +

Software

+

Recommended Version

+

python3

+

3.6

+

python3-dev

+

3.x

+

pam-devl

+

1.1.8-1.3.1

+

ncurses-devel

+

5.9-13.20130511

+

libffi-dev

+

3.1

+

patch

+

2.7.1-10

+
+ +Run the following commands to go to the directory of the open-source and third-party software on which the kernel depends, compile and build the open-source and third-party software, and generate binary programs or library files. **/sda/openGauss-third\_party** is the directory for downloading open-source third-party software. + +``` +[user@linux sda]$ cd /sda/openGauss-third_party/build +[user@linux sda]$ sh build.sh +``` + +After the preceding commands are executed, the open-source and third-party software required for openGauss compilation is automatically generated. To generate any open-source and third-party software independently, go to the corresponding directory and run the **build.sh** script. For example: + +``` +[user@linux sda]$ cd /sda/openGauss-third_party/dependency/openssl +[user@linux sda]$ sh build.sh +``` + +The OpenSSL is generated. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>For error logs, you can view the corresponding log in the build directory and the log in the corresponding module. For example, you can view the OpenSSL compilation and installation logs in the **dependency** module. +>- /sda/openGauss-third\_party/build/dependency\_build.log +>- /sda/openGauss-third\_party/dependency/build/openssl\_build.log +>- /sda/openGauss-third\_party/dependency/openssl/build\_openssl.log + +## Compilation and Build Result + +After the preceding script is executed, the final compilation and build result is stored in the **binarylibs** directory at the same level as **openGauss-third\_party**. These files will be used during the compilation of **openGauss-server**. + diff --git a/content/en/docs/Compilationguide/compiling-the-installation-package.md b/content/en/docs/Compilationguide/compiling-the-installation-package.md new file mode 100644 index 000000000..c2304cf59 --- /dev/null +++ b/content/en/docs/Compilationguide/compiling-the-installation-package.md @@ -0,0 +1,47 @@ +# Compiling the Installation Package + +To compile the installation package is to compile the code and generate the software installation package. The compilation and packaging process of the installation package is also integrated in **build.sh**. + +## Prerequisites + +- The software and hardware have been prepared based on the requirements for setting up the compilation environment, and the code has been downloaded by referring to [Downloading Code](downloading-code.md). +- The open-source software has been compiled and built. For details, see [Compiling Open-source Software](compiling-open-source-software.md). +- You are familiar with the parameter options and functions of the **[build.sh](introduction-to-build-sh.md)** script. +- The code environment is clean, and no file is generated before the current compilation. For details, see [FAQ 4.1](how-do-i-delete-temporary-files-generated-during-compilation.md). + +## Procedure + +1. Run the following command to go to the code directory: + + ``` + [user@linux sda]$ cd /sda/openGauss-server + ``` + +2. Run the following command to compile the openGauss installation package: + + ``` + [user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path] -pkg + ``` + + For example: + + ``` + sh build.sh -pkg # Generate the openGauss installation package of the release version. binarylibs or its soft link must exist in the code directory. Otherwise, the operation fails. + sh build.sh -m debug -3rd /sdc/binarylibs -pkg # Generate the openGauss installation package of the debug version. + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >You can add the **--no\_om\_adapt** option to specify that the installation package does not adapt to the OM. + >This step involves the process of generating software by one-click compilation and the process of encapsulating the software into an installation package. Compared with the **build.sh** command in [Software Compilation and Installation](software-compilation-and-installation.md), only the **-pkg** option is added. + +3. If the following information is displayed, the installation package compilation is successful: + + ``` + success! + ``` + + - The generated installation package is stored in the **./package** directory. + - Compilation log: make\_compile.log + - Installation package packaging log: ./package/make\_package.log + + diff --git a/content/en/docs/Compilationguide/compiling-the-version.md b/content/en/docs/Compilationguide/compiling-the-version.md new file mode 100644 index 000000000..07caad2ed --- /dev/null +++ b/content/en/docs/Compilationguide/compiling-the-version.md @@ -0,0 +1,15 @@ +# Compiling the Version + +A **build.sh** script is provided for compiling openGauss and generating the installation package. You can compile openGauss by using the script. You can also configure environment variables and run commands to compile openGauss. + +This section describes the prerequisites and procedure for openGauss compilation. The following figure shows the compilation process. + +![](figures/绘图1.png) + +- **[Preparation Before Compiling](preparation-before-compiling.md)** + +- **[Software Compilation and Installation](software-compilation-and-installation.md)** + +- **[Compiling the Installation Package](compiling-the-installation-package.md)** + + diff --git a/content/en/docs/Compilationguide/configuring-environment-variables.md b/content/en/docs/Compilationguide/configuring-environment-variables.md new file mode 100644 index 000000000..5ec6c0e92 --- /dev/null +++ b/content/en/docs/Compilationguide/configuring-environment-variables.md @@ -0,0 +1,6 @@ +# Configuring Environment Variables + +The environment variable configuration for compiling the openGauss has been written into the one-click compilation and packaging script. Therefore, you do not need to configure the environment variables. + +If you do not want to use the one-click compilation script, you need to manually configure environment variables. For details, see [Software Compilation and Installation](software-compilation-and-installation.md). + diff --git a/content/en/docs/Compilationguide/downloading-code.md b/content/en/docs/Compilationguide/downloading-code.md new file mode 100644 index 000000000..67d3535d7 --- /dev/null +++ b/content/en/docs/Compilationguide/downloading-code.md @@ -0,0 +1,25 @@ +# Downloading Code + +## Prerequisites + +The git and git-lfs have been installed and configured on the local host. + +## Procedure + +1. Run the following command to download the code and open-source and third-party software repository. _\[git ssh address\]_ indicates the actual code download address. You can obtain them from the openGauss community. + + ``` + [user@linux sda]$ git clone [git ssh address] openGauss-server + [user@linux sda]$ git clone [git ssh address] openGauss-third_party + [user@linux sda]$ # mkdir binarylibs For details about this comment, see the following note. + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- **openGauss-server**: openGauss code repository. + >- **openGauss-third\_party**: open-source third-party software repository on which openGauss depends. + >- **binarylibs**: package for storing the built open-source third-party software. You can obtain the package by referring to [Compiling Open-source Software](compiling-open-source-software.md). Since compiling and building open-source software take a long time, we have compiled and built **binarylibs** using **openGauss-third\_party** and compress and upload **binarylibs** to the Internet. + > You can download the compressed package by visiting **https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.0.0/openGauss-third\_party\_binarylibs.tar.gz**. + > After the download is complete, decompress the package and rename the folder to **binarylibs**. + +2. When the progress of each download reaches 100%, the download is successful. + diff --git a/content/en/docs/Compilationguide/faqs.md b/content/en/docs/Compilationguide/faqs.md new file mode 100644 index 000000000..981a25985 --- /dev/null +++ b/content/en/docs/Compilationguide/faqs.md @@ -0,0 +1,11 @@ +# FAQs + +- **[How Do I Delete Temporary Files Generated During Compilation?](how-do-i-delete-temporary-files-generated-during-compilation.md)** + +- **[How Do I Resolve the "Configure error: C compiler cannot create executables" Error?](how-do-i-resolve-the-configure-error-c-compiler-cannot-create-executables-error.md)** + +- **[How Do I Handle the "g++: fatal error: Killed signal terminated program cclplus" Error?](how-do-i-handle-the-g++-fatal-error-killed-signal-terminated-program-cclplus-error.md)** + +- **[How Do I Handle the "out of memory allocating xxx bytes after a total of xxx bytes" Error?](how-do-i-handle-the-out-of-memory-allocating-xxx-bytes-after-a-total-of-xxx-bytes-error.md)** + + diff --git "a/content/en/docs/Compilationguide/figures/\347\273\230\345\233\2761.png" "b/content/en/docs/Compilationguide/figures/\347\273\230\345\233\2761.png" new file mode 100644 index 0000000000000000000000000000000000000000..08ff6b86e4c81bd5e3ddc7a460515e61d42a7818 GIT binary patch literal 17768 zcmc({WmuG5+ctbsf}n&-NK2|9pp-Dut%OKRN#_8Pk^?9LB7G@JN=SDM9Rd;}ARPmO zgyhgU#C&VKuKT{9_j|YP`}2Iy`~F}XIA^VM)lvI?9EPnYM=}937x~!rs{{({Y zX~2&V$p!F4hW*(F__*kzY~Th#zF) zFB*cu@jE1!6_sLz1>cZe{ys)B*(_E1ZFTkOMv=JA5?{&E{1>@`)t_#XRsO3(OH;lZ zH}?1U@gYbcy}KIwxemw^!Ao4gPdc=sAuMhhJ&83aAM6hZ`=zitJc z=;X}sAn0v+DQMX7Kh^v=z`)=m$GeHk^RRdC-X#j!M3IXvP*yq5{PetE`l=r<(2s5D z!+b4O#9~iEL1Ce4PXd266DjYU8^oH{CHDJ8F>ZF}3jYUY`aic1K`jo`bsm|BXsrA=f}EvRT8j5JgKN_5e%v z3G_I&V`HeCHJk$_r6Z-z@vqUf|f7V{PlcJfiyw%@S(e zAmJ44Bxz?({_ve6KM->gHxa;sXoh+Z4J{r*0%I#RH|W{Q0ZpR81=n;tDP+(*rv>o)oxj+UCW z?a2X2LVmC)`1R@XL`4o}e=TPz)mI&X8A{_&QDS`l zDoT9yX5+!6!_#uhaE?5ccrJP&+n749O(#JkNPsJ>6BANb8tC<4yZx#-W;rvny7_eH zk^k{x!o_L=NQw+I5e`m0xan2?j4*ss`zp8k@Nk|M(D0^L(K7)jXz{r)c3yd6Xo75R zS)L8o^jz`6mn@ZdZ4;B!($f2*JaP z7cQ03Q9~cD?oma6! zAv)?k2VHQkBt1Hd!3({jGnGvE(1JE40s#a7SX?drVgsE0l4gEoHT(bwRZPikJwwBS zF%w@kL!{$KoyVHHQN5>6efFfMe)?FWQaaLUBBR*2KHs?BQ~YqMX?3c$0$mEdP^K)w zNueyMA{A9hq2rr__30e)POsq@?54OeVqVjJU@jEd)>6Y?7^EG} z(O^+a5;0a#khr5n%$nBf+M5q_ayw`L$m7UvF<(^J?KgY=iyZ?dZ_6OG%dG77*M$7T z34dYlxgz@g{YdeGv5#PTW4HrO8@uIgX}aEAqblos$^qMG?a9v{wf+4aJ6Jg*p{n^y zOD%~x&#pNs51V)#2EH06Cx+gVj)W;ts(uiVq1KRefL)>!INr#+eSG{(htJdrb`A2T zeE;{Jx5idmX=U3kL^fMyv9U%yD3@kOY(ILZ_OTT&MVjR3_^>nbv06wwc1$Bg5e_d} zT|LPEKp;%;`DpG@l}*3o$mr-KIT7@hWF#yktr4^8Ab$QTZJeAy>_^jCcz$7_Wh5CZ zVQ^jQlR|L#M>q}DasRCW4-I0xfA+RwZ$Qb=F%*xr(_Dis>};tzo@DT1Z&=8}=ELW} z-q)RXWIBSDwm(mAHtzO`qs*W{Dj@!+0Uks1lT{W9y@B=<TB~`c+X^<(?t#Avq%J4n0$+EH+pq3?VA?_MR{Y zE%TW}6zK1|R@Qlqj_&`Pu`Y<{G(I{Nvg;-%(?WVGL%E7hv`n1+iUg)Se&qQHZ()VjJ?=%zCB+Krn` zwD7}7>YjLHy~7xHqS*YGth@3DOm|(~Gc(vm7YT~BC#JPLTEm$brkcw<{VUHb{o5rw z-48n@tDTI+ZCCxh>@3ijze;5C(GY8)tf2X?L(ayOM4uVsMA2$Cpr&;Xg-3LJHaC;B zeNM6zQA*kDY~MC`N8HHLM%Kmp@7sGVm&Uz9zmIBG0qrg$-(>~8e61XZI|h!G1+6Eh z0~MbPDwZY7?rP9UA;0*CV_a@)`tJVLwf#9oDzWDh;*&N#HsT4zsM6{Fyae>%M9Np0BJutv9PMF~ZcnCg&MwsHMKBM3nlpg>Gg>6v>+3fUToc_wBU-rV zfayAS+Mrk+T8@H+S2@-2$Z>DGYl0hg zMjtHSozEbCOe5rwJz&wH5@%B4SEX6ct{4^N#Blsb@?J)ax@F>xb5r>RrAFfu@=5)A z6MlZf!W?r&t*KQ_eGV0$^V;TRaCB8U#p)qBw07v%y>(vpwQJxYzRLk-S_5qQZjR;N6&0=FpH`M{J}0QO+-5`P74$4S zMjz#UbewuBt|?biXC!FD=+_imp*@!)lI)nU{2{7}L4+avYG8B7FE3?MO|u)qshy;g zNh{nodzd_+J6RrU_Vj0gomk3FtiII#!k2k^%s$JfM|=#qGOMJZDwesg z`sI&0jTc5%t?m0P79@&g8i7**Ya4!c`Em{lPnyjPsA?{0#HWb2`0L|MMPG8ihO#uU z_&+cyhld8hIIo~K-n?6TTf{j~vGg%6YodaAv^&S$)1&Y!!Ql+2%4J2{Oh(FRiJq1@?Eo4-Vl%mOUFx_Yb)$_Uy#SQa0( zKvn7hh0)P6{yexXx&lszp|TO24}-&)1JD&tIQtVqu*E}dg7hr&lww>Nn4^yG6Y%K%>Q4`6fJwyW zPveZsX+`?=uP~P%l)`5+TEK9KEC!{VVJLp6VMQc;>eqoUsISo0yJTy0Q-zo;YF=YG zw}+TqSIy&Haq~w0vPEW-#H`ThY}(}!oRm1y-u;DCDXu066d+SGqoVm-tud&fF0Rmj z$4^8Yi#TriqB7=rv(Y~;QP`pQqu{rA=1Mx0(|SlN*OZ9L*E7D}<-Z2P$RbYG-nG8W zsct$vR2yjhtYjA3*F0 zbX8n2&26FTgA0no*bT0-H`^1|Ius$a`EhBlb(bvTjDkrwV`Eyw`SDw^X>zD_JqCmE zQOU1nQf~k2mX@}zZgzS3J+;&a6Z-W5EM+%&d57Z?^ynScVdX3z&qjUY$S6m)xce~?!3l_8hCJ#^oX|dI%0k^nq)5N5+vOO z+|j3J%HLOH4Y+8A%{BQTGuqejBtTE`?S8){W-tzBjNBH->k8@G^F3|;WK1b2#7^_% z{_U^Yb0x9A6hA0lFZWJ?Sl5Atr82zrkks)r2^6S?8IaNI2wzud!fYaYSy{*=`S*!?;qziag=dto5w@7t9dXFVsaPRj|d750t(tY4P>M|?07e^0=NAhs5W+4cC5i4aqk0pXHJkZvRGdP|E&SHFk$lrTbxFYkvBJe;hdWB-85Di^j~?Fxt$6izFZ z9`k&)G>?u2cGoS_f@^iv)8QWj&ivt=SY;5n6*bfbPF@{E9iz_mn}IXHnIxb%dvk+h zRi16ujWv57BO_>$od-BaAX6_F`YsoEDd$3c=4q}c+7Ez{A6UYG#+-RFnsLiw96l+) z$LIlPj1xcl_3CW>6AiPq zK>IW{IF_X*8vZ&Gqjoe5Q~u$csmB$$f(=|wH@p-+OT$CC>ev zh!%F4NxCZpghBu{O5cC5Zy5so=VRay_I|WcCmw!x^Re>A3?#T0Gm8Uf@RtLyrx~4k z?m(GG(fHCakM&z}(`VGx{EohyT;7OIk?m6SgT)50ojn{RIAnPsxA-$}an;|`)B~7% za;f#L!V}2~J!ci-X!dYM#1&rwZYb*t2v9ZkUp>wk5>yjg%kk$>Ih+!{`;-tE{X2_y z$(RyBeqE!(E)bR)^4E8&z_w zAN(@#W}HBPUFux#lCmHDGH^I)J0GI8ONNe@XqEbTe;tuRPK0nvw(E+#vJ0bw;kZDN zV+DadWC1hZJ5ae@@0$+>R1rUtL-bZzpV#pNW2qP^R>P_j-O7dwSspq&>sMZ46v~ph zA?U6jeqO%J+3WjTOl1(5zpQokk+o}LZi;b}RZBi>5M?w~1mz0wThp+rhA*bG-NkrU z5QsX|HZY=twl{8TjWJvo{%sve@83wx{ILRy_kiB!5{&hL5w`WU9_E1xaPro}1)>j-Xb<*qxt34oky*dZ-R z==eNO@S8l!wDCHt2pqaXnXwXkqA~KQYIqgX1&B_@?AO&C4|M*tsh=6_(n8Ayzogz+ z@iaUX4^n)re)Jgi3GuWcA4O@vb$|A2|3JY9Z+(-Ht8qjvoO?ZC|p! zBvC|S(DQ!RV3WV^{_=P1@LGx9A8cdR-fEMrmQ-9GeieWf0xpdP9ROY{aPw9$`y~hf zp2vKy>GG(dqTS|C31pkhg#$FI;9~XL(OgYYfN^usE>O!&QZg1Cq^q-Ar+atmC(vnh z^u7EuzwME*p6&Yhz#p{Nh6l~x?`X|=^TsFN_1G1Fu#A)Z8SPMVCJ)T@#^HwJ0i!;Y z9B$w0n~#hfbW4VnSo}X(6RC-1<4b?i)ITx;C^f&RNXN?RGmRunV{NptNCX}|gc2S> z%8@m0NRU?fqjWvcqsC|yQ%>xdX^wh|_yjo_edP@VY0T37fAO5JGD>X$ zID3h3;U01L{d?cN0uF}DR{faq?ct~&1W8@=GaIP$a6kjl{YC0c#PC#uB;e`3t6T!i zq#och-}9TM#vkB8wN%TkY)`b?Giv|=EQ)Wef1mo+y*RaZ97&2Q)oDYX$$PQOo{rdw zz1VdzPLx#q3kdqv@-1oR4b!yp0d~cSo8iXudeeN*vbIqcj7a$4 zxt11P-sJOLeLjqGdwUpZ&OW~M5n)qljH}nI+3DE|rh8BNk?8Y}_df_u@4n&JzZXdi zQ9cMtB?ZLs@xMq}%C>z7`XKwiP_%@XXwUGWae1>?b|g(Z;BC0qlpv_^UmRpu5vLa62NBa;69;*7E?4F9&xiJzX4LDJmN*n4>H8t(i)z*82^H$N4YR4>Kqh(4 z=SY>WNF^=V%xBm`(56{C8(#GzUzqoUgBs#$saCF%391#CChx=pDG1_Fi$}mHI7B)F+Zv-;!rvfZ){S|I7>0Q+OGCpH4}oKfc<*pRPyzgyJ@x5 zY+*AXLh%biz9x&g30Z%qgU>WcEP5JNJB{ROW(s+2Og&Ket&x{)+wk^po_^u|NL~!k zIlH~(0iU^WZuf4^gj}5hUCdS+1*b}ZUdgDM=yX=J)Vcnn_q+pRrida2=Y|36n$-8U zq;331j8GT1t9sPP*jT~O44&eej3z)e?#Hcij<7s$r`lIy^3Xn`QevcPEWtgOdwr?w zy7h20;&eW%TF{?&0NH)ufx&XkJ*2Hr2{>VdwnSakHyEFonwB`5%JTr5m6I7Q`Sj#) zQ=?8To=dILrk|M!yx03JZ|(R%;E@GooW%QXnYeI1qxT!n2D%QMD6aK#SjV3pi9xfb z3&~Su0bAC22#bl*Q_^uXJ|Fn0h&(Hs6E?*k-Pl9AIuXSz{fvxvB zSkII2J;?p^N!nqwa6pAS#S+H5a6OFxI+pLd+asuTBhDSrmMh&X z5$|$zsq9Om3m<-QB?#Z-#j%C0uP)a8>M&i+h~V$F=#swAYlsPEPC5RB^J^IysmQjA zysp<{4D(zKDogciSJirxkHfxgq~^0n^Ojz@NqOPq06nr0UTr^|%iy6~A_xz?dVk>_ zt5n=#x!mg7nvPCY_IbM0_aBury_Ixqlzdm6t{0VAC<(fnJ=PhakL?LOeba9hIy^SE zmh6TZ)q8NvJ0cOIPOX8Y!CL>5qb-yvoiu87<(4uJJ_c zYhEqojq3qPMvB=<>WN?b1P#B?iS~cwhy4HlM5CG`@vP2toqG&N(RAs=GScJy=%OgvR)$4-;=kW zRTfdTl?-ZW=q|TW{M4A_o7MY=?NN~0)Y`0n1pX48wN3_O0;v;kx~@%NzY1dYo~~cSVoew2PmkqELsNk{UTZZE+c>q;=6Wx-w-8 z++5tEghz;*|JpqBkALW#wqT2eJ?z9$(E}(P;sY!W4!8|8sbe zLcX3cNSqH(y|G0}VV*mv5K}bZNH@```^Ya_zmaf54iy}Smsv9}=FMats?W}bdf1JX z#Xr3{aOes6RvZciNXvfq(~ZajKyS}2OXlsUaKT~S!VdYmDsg-dV>ITf948)*85zcj z6puH>E#BsyTA@9B3z!GriX&_)1gc3se zAm)vDWI8r^IcXoj@sZ3@Wx)I9Vv0J5B{+AQu_qK~Rs)Z4JV0mJ#lZeyn^h)}@kUpV=UES;$wlw=>ZsRrvt+wQCVAfTH=Wyc z&+2+aW3h0)+s`jD=`5+KpY_~fMgHIXfCus!9NK0 z&|`CW=T$U8z%HO#3w=nqZ&YUHJtS20#^Iv z!>VW+d0t7$SZCT<#m|SE%%&o!_e4)oWKRS;_S3eKpQ!<9+kzBr{;SCX2~9fObOV_WKoL0b!E-#mqC7U zv1gTrnOS;{x@d1m3ZiLpVC{*%el90}$#*$<;?F1K;>S*(Q6edhlj>FQ{3bTlg?z`I z&>?udM2;#(=(q_t>ja}IC4h8?02QvZ8LcYTT(h@ti{(1$4Go}8{d$+iWE&U-pV{;J zx0NGR4iqZHp`84@8zo+%BM;;1yria;Pr8ltr%Wl%KFQ*U+-Ipz=8|8ukg;_>^9NQe zGFvX0hJnjXhxd-LvXWDlRkx$k}JM(o45F-;d9v;&sfMXgUcqKAgJob8jjl)Xv@|@yk#R)E6EO-704~~ak(L%JjlgvKmJD*!O7=DB!a7Mm~-V^U@buWIi#D%y&G1~30EMoJ~ z#}t$7mWrFp&X~!7@Ys9#{`}hY9iA&rn>ik@0gn=SBv#rbb1$_)6#9i)pv&x|SpdWTs+I zuK0__`A2h29ZX-d%12hsQQ7Zcsl9|BC- zI@=U}MgXHWQi2Us^cGJ@3pGkFRi00+=duS}XB>H7ghCWsRIlXYSn;9RDUH><6u zMu9x%n_}@mQcCtoTl+hlW-KJH9A!~?n*hb>x_zOAhb`?y>NLRT8&AHHk%GSD0?G6h&=|wle*asY@AOZH>mu zlVo1L`3{01?5B(KovWC&;)iu=8DazO#Y_SIm2;BEfAtxBU0(30`PEVOwERVEF`bKM ztB6z0b4T5~kjVhZZw#z-$j_jFh35KBh09~14{sWya9PA78@Y-%XZ#T+gA!h9!@x>h z=98vz24080(CU|np0Vph%S(rQQwEdIm|lbKBI4|A|5Egb!_QhvS~%m2%gu}-q+9z{ zaWj`Ti|qh1lDiVCq}XrJ?6bV}mNro3ckA4_k2K=P(C%*>sfuGN0l`>+^Xo^yl$PTn zPL@AgMKCZ*tjnX2=nU*kabpF0MEvs|aYlr!(jR+XwG(-P*h{Sj9*0Tw4ceqo-bvJ| za~BPWEvA8MjDS$1&vu5tAfTWh`I*_oYYl{Ykw}}t78$v{__X9g+Ei0Yrt6I2h4Kj| zA~#o9m_h#HNriQ zzY?XpP(EWc=RO2QBzsPlU?K9vy&6@Vd2&=Y^YF2^$zbNw*>dSCmFo2rW*d^9sw%WI?_@$1Av}pwnV;jh}t*W6q&yfxOrqcfez)0);hgHr9TmW3; zU+4whNKZ80WeKK;Am;NUQ_N{j^h3$tUZ{J?EgC$GfNWeRe=S@(c z`1#;JoL}ktJN{oD#z_CT1Hcz<0v%CIMHN0NxDIRyc(n<)6PN%R=U3S;^Lh`sKY&qI zf23!sOJyb`ib_2NHvb!dZUXDTJ9r|tYex%B@}ea%(!HdK`Gt=E1@dw7@^a)Mk-DH< zaI)MgX|%}K5I)JAQ|*xN#wbq7IPH7Q{sdXHI$2adSq)QPN_LA9_gJ-r75)ITo_LZ(R`F-`*q+B|2I!x`wj+q}G$gI;4! zA&F<>?7El>r_2A!Ws=N_p7Gbq&bZe+2|Wj-^5YnS6Dbp(M(khjn`*i|Zw8hCKFrc~ zdl#X3;+*d>D;Mx0_;-uaqY(_&BqtZTCvApjY4%$Vo4yOfMjSK?wt3zZu*7C}juf0p z37^DsOUD3J`h}Yj-s^6TXnV#KNzJ@`4nG;cHQ)J<3{ zy8skBwVY2I?oaH5`)t%IUviPU7Z(TUVkN8$IYp%_K% z#H!Q)*0=i{p}kUi$gDZCDthZSBzKcHkTLhg9)5=K%o*3e@52g*Ymw?=XzoPUU#ZXoF&`IW^w=5x5h&hLt%IwaqR z_1Y!!&m?E85g(4i=big}XQb&$vqWPorh)M*=j2}=>>*w;t&1E|6>fH(l@1I~YL)q8JO1$MF_A+*T5@`_}k$cjKzP>rOvB)Iv=% zN1A;7C@Em8+qBA3!RjZ5uoc+ou8MlDnRM67@-m$-4UOj#@--5sz zaARFc0uFO(y;mq8AQ9AKCT{OG>ijDu5q58zCYt+gQQ>`1x0PlrdReYmk^i81mw2DY zF?~j&<=&dKB$&Cr>+kCdPmh*tgr3b(-@c!z9g1AGEkqy9u}!b!Ok86sgNxa9TF!K@ z`_9ko7bF#reLmwC`6&`OtUt)G6?j7rYvdcXS|o=$sA77dQ!JG4n=@4n;t`6KIAvra`5h4Kt+ z*z*KDKIYNf@R2iBPjNJJnYYBVOx+;T2lu9LmfSNkZGc?6VUr|yrSnup$fwOOTQ z4+YIys&Rx4gUeSPjLlkrc;&tU7&f`_UG>V$Bj-_~rNb&F(m*=k?>eU+oRc>m8U!V} zGdjYqY1}W}1mWq59%h~en|oFLK6p1Y2P%vuw|=xmuO5$$u9iCeYGPXo(imM^Q=@*m zGsotSJ(;OyA`c`4L#Dl0S!`bYY1cd!yQB(sj+z+RkYzzWY~N==0j4EQ3WkjKtMRUQ zeTliyf-fPywoZGqS{KSab!!f@+ym|sOqX*3>_M6v9{fB-%WWr>oBIUYk5lzB6 za>OY)YsF0Vpnj0NRGMa1E+P#ZF^SMBB4FVwXL`6XjL59ha~?>US9+`sNw$U&%~}iv z9LAU!oXv>uRo+F)-Cb=-Wt!ao27g-W5;V!kka~-Sl>|3&-}8{KnCZ8q!f2+qJuf%0 zY4-lkyMA{9&iRpYcrkcD522s_hntw=t+l;2GUmte*`4F$+!n+l&AG@>rvY}sZ}3oj zRym;B(ti6RdUbm{Jo{(yh+?iz_bR3+hZ6^zmE>ZnQKrw;<6wa_z-|QIsx>P-f3B>= zG}+XQe$04cGve%iCQRTl5+j2flM{>?7~nSJUZjt07O&h@Mx|%%^#<|l9In1lL(G~` z??0FZr*r93DTKeOfiTHcVoobTtSt8SgtHvYv7^sX(@!gHgPvA7+QAs1*WeKGke<`x zk=IY0#W4+bn4eU=fJF6}Whrn!>f;2eA_a1dfo5O@E$E2~r34d6W$YllkQ_cT;dvi} zX;TnSf0ht{tv|snlo-&EBsHK0SEuRV&C`G;`IG74`g8s3i=N3hK6s%G}dt`ri=cEqA?i2qFuO~Q8EO#gKwX$MX0*V2rQO4F{up7X znHS8|Z;M4PDxxH}&eMi#pOGmLQObf%=;}paE_%Srv`%JHY_b~99I#pmDaRIS^_x!Y zXuK_2P_?ccXWbhhi|~0KLe8HY^PJ&CcPf4Gd8WtnZK4b81hd?r7$W1gUPHQyRzQKZ zKTAxLSMK~-(>Wr|e(X{6;1qY-4~+ox^B0$)a~U(dPeDvDFZ&;#c!uYa>)n?%(L+wn z2)&}Fr`RyyU6n(bJoPsS2xslU19uOVn_9cS9d@A>@RX& zqlP%{nhz+0r0DOyl!C-WdXRc1_SsubH*0&nlI+%3#`I8q>OXovwOmGkKs*d#A|H~l z!aCi!=i5c)!oDoVMnxJyr=eH*+gQ;U3Yf*@L5p4w`G5iS0aE%YK|`;d+0jd-&5oXOhV)8SS$3nh{Sc zIF+4E7G_>%)kAZcUI@3BAF?&l)LW%T!K<=YhH}@oZ@ zeN;32JUsDP_v+0Sezt~?dGy?=UVd-rG@w9hYzbq0C4aavJ(8`)5J@lm5aiq|)g_ND zQ8#W}bR~2oF^?kos@-*`dMbnEbe6bo@jWPM4yNSc=O6po=&NI50?Mu1Zz#2p1;??2 zjPer$gS^&O*%Ysz-W-?oMV+SZry$OcLDZ)W1P1NG>Ppzac~gw7l%0mxl7OEK6)a{V z9~GEzET%;0)E*q-I7@71DA4(+i)xENYAH-QQol6j@+lWD+3y$NxB})o8L7gk$Y{!g zOhc@`Frq*+9;x3XGtsza!E9Y*zsKdHq#qw~Djj*^-bN`TV~B^Cw#HCHFLe zt_6=U#Mz5G6pIwPbWTE|jH=GeK z%NnNiBER&-DMQWq!gf$JFUfXNI z*RuNM&%`H0B(1sqxqXW#+SjuY+~^n2YfyPN;UBBG|lCL|82375_%RlA0 z~eo=#Mqp~N*;C{83jQjAh_BHgO z_be5=VlL=MKukWIp%4K~hIY1CFypLay{dWHS&Bv#90}*ZY>U2&aQtQyRVr>yWzc1P zetIm1?(C4!cM83B)_34mI^9g`zB;V3yVS4e?Oki)ix$?n^CAA0mKd9(SW9=98uHBd z7?xn?n0L^lkaYaMgwd?1wC2}q26u2#2BgMKI#9^FQ-9@S-C20~^=j2?{F`LfW*j;q z8FD}dedRTS(+`lowudFZwTfr&!m}T4PFe|fZ#Yfq;Sv$m>TyEt#ThM;IB4AfW=!nX zZPz;i(z249|?FtF^V=Pp`OncpELKL`Fs2&k+g3jT)nkWAiKJHWk*?v;5v+x2RmY^!^PU5_~e^%n>UjdRJ!RBnaR zT5!_)sB*{qLxi+o`jl_-Vk~#O1H>*8mU8ROG&@dmaP${W(^aqO$V;NK^@+)rV>Epd?jNlJ>) zy*+rh2k-B#FOGO4U$DsO%Nqd88KYD2f@vCt&nb`Ete>rzKO8fylT4Oe0%zjSG>9fv z_KTiA_;N_6PULx;gCSh9WrBvMEc51si79J5Sy>L^?y6i;uKs!?^#wkNxfy8u@Uw zOX(fRSiWM_6SoReF8}+}+~rIN^eZKh%i(D1lad^^}hH1^hV)rVP?-#PTr&t>J&J1YaF*ovR#Dk2rV3=r4GlS~wvi{lD88 zmBBe3+Th=vFkVAS<@Zw_!If1S$S&ztnPs#mvYKi!e60&~JBj}#9rEr@t>UupDGN2+z|YIDFd?85um;t&h!;k^H{&Al^|Ob&mIS3$nF=JCPxe^*KRw$L?J6XMO=e0;ip%Jc7mfaHq}7Cgg# z1Hr>rT(M0T#lMQXUejSoG^x|=7IIT{UZBi0 zU*w+ucd~nD-|xd2pjK9wHGK~8{ciNi<8CCyZnWHwB~3T~Z$>{%rJyj9sW768peWE6 zUr+zTthGXeW!uu9DlrdKZ;W5vK^%IXtJqzO(J2Azagxa4z&_yVG!s9568{yX?zv0! z^3MjprsYyVTXijJAq!f7LRv)*r60J%6Ju12)KTA*W}HlHxKWCv-IgJlNlOX0`?S$q zCkSXOsU=$95;IQ?vyZhuJ9md$4mxw4WJL(0B$GR0S45oBUZMg7Cqz*GTJW@92O`LX zW|=C&uh6XjI=K7m$6Xqt(Ddfbk(nny@*=o#K-{qjU>)Na3V5m|ajT&l-tBU14*Y5ANV_2CahF-|A zYKW@2-zc%Ho-TI$U~ejV)-J`xY2x`HUb4LQ_v@jid(-^!BFP=Ve)EH%J&yyU`NsIe z%@K-(B$i5TREO02UJt>06WjxCYGbEt%;=N$M;j_)vH$^BI*sDa!neykc&)0kYFfT&cBG|BQU7E$7xLtPwP zz6ucv?>fHGZuQ0OREpj%WvB0*^jKA!+vjut4F z1tP!=BB@EMAx^ugn&;v>UF^8Xm|>bA2p89a#7cfc!<6Su19m0o_oBFxehTR=ety0F zR0(0rE=u@R&4BgIz)kR}391}5!D9kVZRG`$mMp(Tk^*)bs(P=@(VFM;?%!_Z*tUmJ ziOAvk*@0`z064~f>**P3@ODG}qEm{3%H#VPe}%sk1sA1@-@GAZa{GRlUd%NM={(~E z<7lA;pT)84N~<3%qBIp06j&oj0i3Y>rU;_>Foi-8IM{;8*DYs62!UAv8ZlKEaqic+ zH~8o_m~6c#+A%a<6wK7yf{_ zdRqESCie|)lq?GXT7WW+0*o#MwkDEM>@>n4G zbOQh%4O$wie^UlKmZ)@SRQ2OM1u)SC-xx>?e@i`TYf4+04HRaM&VG5RV`TI_Sd1hE z#Lsy(HTo}!DUhiG4R08O1a_ZBZG8<>;HCKvpr2h?f!~dPpx<3{st6)C$_p|^>$gq2 zs7MI-mq=zJ_2o;Skq6&ijxb&%1GsS!ha0IPNI>r&udM#=ZF?HW?g#tRd)CX$_)qUG znA9T43~*KEUl@p%k%*9n*L2F z;MN_(6Y#X_LN}mScx3viz}1+rufbvhz0qV7tYWT&s$`F%NYH=p8uGL*=5jLFHAAy+ zRM%+kf;oQN+LaY?31tjYeRW6av3`4AVd3ypt(%UP7Ra(3Nk=nF2sw@mq`38oA?0LJ hW7uaE>D=0}6q~1ZP+8WkBVfWH6$N$qV%cZG{|_Bf*>(T` literal 0 HcmV?d00001 diff --git a/content/en/docs/Compilationguide/hardware-requirements.md b/content/en/docs/Compilationguide/hardware-requirements.md new file mode 100644 index 000000000..f8842cde4 --- /dev/null +++ b/content/en/docs/Compilationguide/hardware-requirements.md @@ -0,0 +1,11 @@ +# Hardware Requirements + +Hardware requirements for compiling the openGauss are as follows: + +- Number of servers: 1 +- Hardware specifications: + - CPU: 4U + - Memory: 8 GB + - Free disk space: 100 GB \(Linux 64-bit\) + + diff --git a/content/en/docs/Compilationguide/how-do-i-delete-temporary-files-generated-during-compilation.md b/content/en/docs/Compilationguide/how-do-i-delete-temporary-files-generated-during-compilation.md new file mode 100644 index 000000000..bcceaee4c --- /dev/null +++ b/content/en/docs/Compilationguide/how-do-i-delete-temporary-files-generated-during-compilation.md @@ -0,0 +1,23 @@ +# How Do I Delete Temporary Files Generated During Compilation? + +## Question + +How do I delete temporary files generated during compilation? + +## Answer + +Go to the **/sda/openGauss-server** directory and run the following command to delete temporary files generated during compilation. + +- Delete the files generated by **configure** and **make**. + + ``` + make distclean -sj + ``` + +- Delete the files generated by **make**. + + ``` + make clean -sj + ``` + + diff --git a/content/en/docs/Compilationguide/how-do-i-handle-the-g++-fatal-error-killed-signal-terminated-program-cclplus-error.md b/content/en/docs/Compilationguide/how-do-i-handle-the-g++-fatal-error-killed-signal-terminated-program-cclplus-error.md new file mode 100644 index 000000000..ccbf72734 --- /dev/null +++ b/content/en/docs/Compilationguide/how-do-i-handle-the-g++-fatal-error-killed-signal-terminated-program-cclplus-error.md @@ -0,0 +1,12 @@ +# How Do I Handle the "g++: fatal error: Killed signal terminated program cclplus" Error? + +## Question + +How do I resolve the "g++: fatal error: Killed signal terminated program cclplus" error that occurs during compilation. + +## Answer + +Error cause: The **-sj** parameter is added to the compilation process in the script. The number of concurrent tasks is too large. + +Solution: Reduce the number of concurrent **make** tasks during compilation or run the **make** command directly. If the one-click script is used, you need to modify the script. + diff --git a/content/en/docs/Compilationguide/how-do-i-handle-the-out-of-memory-allocating-xxx-bytes-after-a-total-of-xxx-bytes-error.md b/content/en/docs/Compilationguide/how-do-i-handle-the-out-of-memory-allocating-xxx-bytes-after-a-total-of-xxx-bytes-error.md new file mode 100644 index 000000000..3c9f0f4ea --- /dev/null +++ b/content/en/docs/Compilationguide/how-do-i-handle-the-out-of-memory-allocating-xxx-bytes-after-a-total-of-xxx-bytes-error.md @@ -0,0 +1,12 @@ +# How Do I Handle the "out of memory allocating xxx bytes after a total of xxx bytes" Error? + +## Question + +How do I resolve the "out of memory allocating xxx bytes after a total of xxx bytes" error that occurs during compilation. + +## Answer + +Error cause: The **-sj** parameter is added to the compilation process in the script. In addition, the machine configuration is low, the memory is insufficient, and the number of concurrent tasks is too large. As a result, an error is reported. + +Solution: Reduce the number of concurrent **make** tasks during compilation or run the **make** command directly. If the one-click script is used, you need to modify the script. + diff --git a/content/en/docs/Compilationguide/how-do-i-resolve-the-configure-error-c-compiler-cannot-create-executables-error.md b/content/en/docs/Compilationguide/how-do-i-resolve-the-configure-error-c-compiler-cannot-create-executables-error.md new file mode 100644 index 000000000..f13396615 --- /dev/null +++ b/content/en/docs/Compilationguide/how-do-i-resolve-the-configure-error-c-compiler-cannot-create-executables-error.md @@ -0,0 +1,12 @@ +# How Do I Resolve the "Configure error: C compiler cannot create executables" Error? + +## Question + +How do I resolve the "Configure error: C compiler cannot create executables" error reported during version compilation? + +## Answer + +Error cause: The **binarylibs** file is incomplete or damaged. + +Solution: If **binarylibs** is built using open-source software, rebuild the open-source third-party software. If **binarylibs** is downloaded from code, download the code again. Then run the current script or command again. + diff --git a/content/en/docs/Compilationguide/introduction-to-build-sh.md b/content/en/docs/Compilationguide/introduction-to-build-sh.md new file mode 100644 index 000000000..3f69d58bd --- /dev/null +++ b/content/en/docs/Compilationguide/introduction-to-build-sh.md @@ -0,0 +1,75 @@ +# Introduction to build.sh + +**openGauss-server/build.sh** is an important script tool during compilation. It integrates software installation and compilation and product installation package compilation functions to quickly compile and package code. + +The following table describes the parameters. + +**Table 1** build.sh parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Option

+

Default Value

+

Parameter

+

Description

+

-h

+

Do not use this option.

+

-

+

Help menu

+

-m

+

release

+

[debug | release | memcheck]

+

Selects the target version.

+

-3rd

+

${Code directory}/binarylibs

+

[binarylibs path]

+

Specifies the path of binarylibs. The path must be an absolute path.

+

-pkg

+

Do not use this option.

+

-

+

Compresses the code compilation result into an installation package.

+

--no_om_adapt

+

Do not use this option.

+

-

+

The installation package does not adapt to the OM and takes effect only when the -pkg option is added.

+
+ +>![](public_sys-resources/icon-note.gif) **NOTE:** +>1. **-m \[debug | release | memcheck\]** indicates that three target versions can be selected: +> - **release**: indicates that the binary program of the release version is generated. During compilation of this version, the GCC high-level optimization option is configured to remove the kernel debugging code. This option is usually used in the generation environment or performance test environment. +> - **debug**: indicates that a binary program of the debug version is generated. During compilation of this version, the kernel code debugging function is added, which is usually used in the development self-test environment. +> - **memcheck**: indicates that a binary program of the memcheck version is generated. During compilation of this version, the ASAN function is added based on the debug version to locate memory problems. +>2. **-3rd \[binarylibs path\]** is the path of **binarylibs**. By default, **binarylibs** exists in the current code folder. If **binarylibs** is moved to **openGauss-server** or a soft link to **binarylibs** is created in **openGauss-server**, you do not need to specify the parameter. However, if you do so, please note that the file is easy to be deleted by the **git clean** command. +>3. Each option in this script has a default value. The number of options is small and the dependency is simple. Therefore, this script is easy to use. If the required value is different from the default value, set this parameter based on the actual requirements. + diff --git a/content/en/docs/Compilationguide/introduction.md b/content/en/docs/Compilationguide/introduction.md new file mode 100644 index 000000000..029f87b79 --- /dev/null +++ b/content/en/docs/Compilationguide/introduction.md @@ -0,0 +1,7 @@ +# Introduction + +- **[Purpose](purpose.md)** + +- **[Overview](overview.md)** + + diff --git a/content/en/docs/Compilationguide/os-requirements.md b/content/en/docs/Compilationguide/os-requirements.md new file mode 100644 index 000000000..41f75c169 --- /dev/null +++ b/content/en/docs/Compilationguide/os-requirements.md @@ -0,0 +1,7 @@ +# OS Requirements + +The following OSs are supported: + +- CentOS 7.6 \(x86 architecture\) +- openEuler-20.03-LTS \(aarch64 architecture\) + diff --git a/content/en/docs/Compilationguide/overview.md b/content/en/docs/Compilationguide/overview.md new file mode 100644 index 000000000..a2c51b116 --- /dev/null +++ b/content/en/docs/Compilationguide/overview.md @@ -0,0 +1,4 @@ +# Overview + +This document describes the operating system \(OS\) requirements, compilation environment requirements, software dependencies, compilation methods, and compilation result storage paths of the openGauss. + diff --git a/content/en/docs/Compilationguide/preparation-before-compiling.md b/content/en/docs/Compilationguide/preparation-before-compiling.md new file mode 100644 index 000000000..574998d71 --- /dev/null +++ b/content/en/docs/Compilationguide/preparation-before-compiling.md @@ -0,0 +1,9 @@ +# Preparation Before Compiling + +- **[Downloading Code](downloading-code.md)** + +- **[Compiling Open-source Software](compiling-open-source-software.md)** + +- **[Introduction to build.sh](introduction-to-build-sh.md)** + + diff --git a/content/en/docs/Compilationguide/public_sys-resources/icon-caution.gif b/content/en/docs/Compilationguide/public_sys-resources/icon-caution.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 GIT binary patch literal 580 zcmV-K0=xZ3Nk%w1VIu$?0Hp~4{QBgqmQ+MG9K51r{QB&)np^||1PlfQ%(86!{`~yv zv{XhUWKt}AZaiE{EOcHp{O-j3`t;<+eEiycJT4p@77X;(jQsMfB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Compilationguide/public_sys-resources/icon-danger.gif b/content/en/docs/Compilationguide/public_sys-resources/icon-danger.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 GIT binary patch literal 580 zcmV-K0=xZ3Nk%w1VIu$?0Hp~4{QBgqmQ+MG9K51r{QB&)np^||1PlfQ%(86!{`~yv zv{XhUWKt}AZaiE{EOcHp{O-j3`t;<+eEiycJT4p@77X;(jQsMfB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Compilationguide/public_sys-resources/icon-note.gif b/content/en/docs/Compilationguide/public_sys-resources/icon-note.gif new file mode 100644 index 0000000000000000000000000000000000000000..6314297e45c1de184204098efd4814d6dc8b1cda GIT binary patch literal 394 zcmZ?wbhEHblx7fPSjxcg=ii?@_wH=jwxy=7CMGH-B`L+l$wfv=#>UF#$gv|VY%C^b zCQFtrnKN(Bo_%|sJbO}7RAORe!otL&qo<>yq_Sq+8Xqqo5h0P3w3Lvb5E(g{p01vl zxR@)KuDH0l^z`+-dH3eaw=XqSH7aTIx{kzVBN;X&hha0dQSgWuiw0NWUvMRmkD|> literal 0 HcmV?d00001 diff --git a/content/en/docs/Compilationguide/public_sys-resources/icon-notice.gif b/content/en/docs/Compilationguide/public_sys-resources/icon-notice.gif new file mode 100644 index 0000000000000000000000000000000000000000..86024f61b691400bea99e5b1f506d9d9aef36e27 GIT binary patch literal 406 zcmV;H0crk6Nk%w1VIu$@0J8u9|NsB@_xJDb@8;&_*4Ea}&d#;9wWXz{jEszHYim+c zQaU<1At50E0000000000A^8Le000gEEC2ui03!e%000R7038S%NU)&51O^i-Tu6`s z0)`MFE@;3YqD6xSC^kTNu_J>91{PH8XfZ(p1pp2-SU@u3#{mEUC}_}tg3+I#{z}{Ok@D_ZUDg- zt0stin4;pC8M{WLSlRH*1pzqEw1}3oOskyNN?j;7HD{BBZ*OEcv4HK!6Bk6beR+04 z&8}k>SkTusVTDmkyOz#5fCA$JTPGJVQvr3uZ?QzzPQFvD0rGf_PdrcF`pMs}p^BcF zKtKTd`0wipR%nKN&Wj+V}pX;WC3SdJV!a_8Qi zE7z`U*|Y^H0^}fB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Compilationguide/purpose.md b/content/en/docs/Compilationguide/purpose.md new file mode 100644 index 000000000..fa5de2504 --- /dev/null +++ b/content/en/docs/Compilationguide/purpose.md @@ -0,0 +1,4 @@ +# Purpose + +This document helps you quickly understand the software and hardware requirements, environment configuration, and how to compile software or installation packages from the source code for compiling the openGauss. + diff --git a/content/en/docs/Compilationguide/setting-up-the-compilation-environment.md b/content/en/docs/Compilationguide/setting-up-the-compilation-environment.md new file mode 100644 index 000000000..cda27d365 --- /dev/null +++ b/content/en/docs/Compilationguide/setting-up-the-compilation-environment.md @@ -0,0 +1,9 @@ +# Setting up the Compilation Environment + +- **[Hardware Requirements](hardware-requirements.md)** + +- **[Software Requirements](software-requirements.md)** + +- **[Configuring Environment Variables](configuring-environment-variables.md)** + + diff --git a/content/en/docs/Compilationguide/software-compilation-and-installation.md b/content/en/docs/Compilationguide/software-compilation-and-installation.md new file mode 100644 index 000000000..8ff29e571 --- /dev/null +++ b/content/en/docs/Compilationguide/software-compilation-and-installation.md @@ -0,0 +1,120 @@ +# Software Compilation and Installation + +Software compilation and installation are to compile code to generate software and install the software on a computer. The one-click compilation script **build.sh** is provided. You can also manually configure environment variables. The two methods are described below in this section. + +## Prerequisites + +- The software and hardware have been prepared based on the requirements for setting up the compilation environment, and the code has been downloaded by referring to [Downloading Code](downloading-code.md). +- The open-source software has been compiled and built. For details, see [Compiling Open-source Software](compiling-open-source-software.md). +- You are familiar with the parameter options and functions of the **[build.sh](introduction-to-build-sh.md)** script. +- The code environment is clean, and no file is generated before the current compilation. For details, see [FAQ 4.1](how-do-i-delete-temporary-files-generated-during-compilation.md). + +## Compilation Using the One-click Script + +1. Run the following command to go to the directory where the software code compilation script is stored: + + ``` + [user@linux sda]$ cd /sda/openGauss-server + ``` + +2. Run the following command to compile openGauss: + + ``` + [user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path] + ``` + + For example: + + ``` + sh build.sh # Compile and install openGauss of the release version. binarylibs or its soft link must exist in the code directory. Otherwise, the operation fails. + sh build.sh -m debug -3rd /sdc/binarylibs # Compile and install openGauss of the debug version. + ``` + +3. If the following information is displayed, the compilation is successful: + + ``` + make compile sucessfully! + ``` + + - The software installation path after compilation is **/sda/openGauss-server/dest**. + - The compiled binary files are stored in **/sda/openGauss-server/dest/bin**. + - Compilation log: make\_compile.log + + +## Manual Compilation + +1. Run the following command to go to the software code directory: + + ``` + [user@linux sda]$ cd /sda/openGauss-server + ``` + +2. Run the following script to obtain the system version: + + ``` + [user@linux openGauss-server]$ sh src/get_PlatForm_str.sh + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The command output indicates the OSs supported by the openGauss. The OSs supported by the openGauss are centos7.6\_x86\_64 and openeuler\_aarch64. + >- If **Failed** or another version is displayed, the openGauss does not support the current operating system. + +3. Configure environment variables, add **\_\_\_\_** based on the code download location, and replace **\*\*\*** with the result obtained in [Step 2](#li1666842982511). + + ``` + export CODE_BASE=________ # Path of the openGauss-server file + export BINARYLIBS=________ # Path of the binarylibs file + export GAUSSHOME=$CODE_BASE/dest/ + export GCC_PATH=$BINARYLIBS/buildtools/***/gcc8.2/ + export CC=$GCC_PATH/gcc/bin/gcc + export CXX=$GCC_PATH/gcc/bin/g++ + export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH + export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH + ``` + +4. Select a version and configure it. + + **debug** version: + + ``` + ./configure --gcc-version=8.2.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --without-readline --without-zlib + ``` + + **release** version: + + ``` + ./configure --gcc-version=8.2.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --without-readline --without-zlib + ``` + + **memcheck** version: + + ``` + ./configure --gcc-version=8.2.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --without-readline --without-zlib --enable-memory-check + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >1. _\[debug | release | memcheck\]_ indicates that three target versions are available. The three target versions are as follows: + > - **release**: indicates that the binary program of the release version is generated. During compilation of this version, the GCC high-level optimization option is configured to remove the kernel debugging code. This option is usually used in the generation environment or performance test environment. + > - **debug**: indicates that a binary program of the debug version is generated. During compilation of this version, the kernel code debugging function is added, which is usually used in the development self-test environment. + > - **memcheck**: indicates that a binary program of the memcheck version is generated. During compilation of this version, the ASAN function is added based on the debug version to locate memory problems. + >2. On the ARM-based platform, **-D\_\_USE\_NUMA** needs to be added to **CFLAGS**. + >3. On the **ARMv8.1** platform or a later version \(for example, Kunpeng 920\), **-D\_\_ARM\_LSE** needs to be added to **CFLAGS**. + >4. If **binarylibs** is moved to **openGauss-server** or a soft link to **binarylibs** is created in **openGauss-server**, you do not need to specify the **--3rd** parameter. However, if you do so, please note that the file is easy to be deleted by the **git clean** command. + +5. Run the following commands to compile openGauss: + + ``` + [user@linux openGauss-server]$ make -sj + [user@linux openGauss-server]$ make install -sj + ``` + +6. If the following information is displayed, the compilation and installation are successful: + + ``` + openGauss installation complete. + ``` + + - The software installation path after compilation is _$GAUSSHOME_. + - The compiled binary files are stored in _$GAUSSHOME_**/bin**. + + diff --git a/content/en/docs/Compilationguide/software-dependency-requirements.md b/content/en/docs/Compilationguide/software-dependency-requirements.md new file mode 100644 index 000000000..284f64b39 --- /dev/null +++ b/content/en/docs/Compilationguide/software-dependency-requirements.md @@ -0,0 +1,53 @@ +# Software Dependency Requirements + +[Table 1](#table1212531681911) lists the software requirements for compiling the openGauss. + +You are advised to use the default installation packages of the following dependent software in the listed OS installation CD-ROMs or sources. If the following software does not exist, refer to the recommended versions of the software. + +**Table 1** Software dependency requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Software

+

Recommended Version

+

libaio-devel

+

0.3.109-13

+

flex

+

2.5.31 or later

+

bison

+

2.7-4

+

ncurses-devel

+

5.9-13.20130511

+

glibc.devel

+

2.17-111

+

patch

+

2.7.1-10

+

lsb_release

+

4.1

+
+ diff --git a/content/en/docs/Compilationguide/software-requirements.md b/content/en/docs/Compilationguide/software-requirements.md new file mode 100644 index 000000000..c66c1c0f7 --- /dev/null +++ b/content/en/docs/Compilationguide/software-requirements.md @@ -0,0 +1,7 @@ +# Software Requirements + +- **[OS Requirements](os-requirements.md)** + +- **[Software Dependency Requirements](software-dependency-requirements.md)** + + diff --git a/content/en/docs/Description/Description.md b/content/en/docs/Description/Description.md new file mode 100644 index 000000000..4c8de0c5d --- /dev/null +++ b/content/en/docs/Description/Description.md @@ -0,0 +1,4 @@ +# Product Description + +This document describes the OpenGauss database in terms of its product positioning, system architecture, application scenarios, operating environment, technical specifications, basic functions and features, and enterprise-level enhanced features. + diff --git a/content/en/docs/Description/application-scenarios.md b/content/en/docs/Description/application-scenarios.md new file mode 100644 index 000000000..a8b126fd4 --- /dev/null +++ b/content/en/docs/Description/application-scenarios.md @@ -0,0 +1,11 @@ +# Application Scenarios + +- Transaction applications + + Applications need to process highly concurrent online transactions containing a large volume of data, such as e-commerce, finance, O2O, telecom customer relationship management \(CRM\), and billing. + +- IoT data + + In IoT scenarios, such as industrial monitoring, remote control, smart cities, smart homes, and loV, challenges come from a large number of sensors and monitoring devices, high sampling frequency, additional storage modes, and concurrent operation and analysis. + + diff --git a/content/en/docs/Description/basic-features.md b/content/en/docs/Description/basic-features.md new file mode 100644 index 000000000..ef011c424 --- /dev/null +++ b/content/en/docs/Description/basic-features.md @@ -0,0 +1,33 @@ +# Basic Features + +## Background + +openGauss is a standalone database. It has the basic features of relational databases as well as enhanced features. + +## Features + +- Standard SQLs + + Supports SQL92, SQL99, SQL2003, and SQL2011 standards, GBK and UTF-8 character sets, SQL standard functions and analytic functions, and SQL Procedural Language. + +- Database storage management + + Supports tablespaces where different tables can be stored in different locations. + +- Primary/standby deployment + + Supports the ACID properties, single-node fault recoveries, primary/standby data synchronization, and primary/standby switchover. + +- APIs + + Supports standard JDBC 4.0 and ODBC 3.5. + +- Management tools + + Provides installation and deployment tools, instance start and stop tools, and backup and restoration tools. + +- Security management + + Supports SSL network connections, user permission management, password management, security auditing, and other functions, to ensure data security at the management, application, system, and network layers. + + diff --git a/content/en/docs/Description/data-partitioning.md b/content/en/docs/Description/data-partitioning.md new file mode 100644 index 000000000..5dfc29ac2 --- /dev/null +++ b/content/en/docs/Description/data-partitioning.md @@ -0,0 +1,48 @@ +# Data Partitioning + +Most database products partition data. In openGauss, data is partitioned horizontally with a user-specified policy. This operation splits a table into multiple partitions that are not overlapped. + +openGauss supports range partitioning. Records to be inserted into a table are divided into multiple ranges based on one or more columns. A partition for each range is created to store data, and partition ranges do not overlap. If you specify the **PARTITION** parameter in the **CREATE TABLE** statement, data in the table will be partitioned. + +[Table 1](#en-us_topic_0237080621_en-us_topic_0231764089_en-us_topic_0059777656_t77b9e09809f742f1aaadea05d041bc23) uses an xDR scenario to describe the benefits provided after data is partitioned based on time fragments. + +**Table 1** Partitioning benefits + + + + + + + + + + + + + + + + +

Scenario

+

Benefits

+

Frequently accessed rows in a table are located in one or a few partitions.

+

Improves access performance by significantly reduces search space.

+

Most partition records need to be queried or updated.

+

Improves performance because only one partition rather than the whole table needs to be scanned.

+

Records that need to be loaded or deleted in batches are located in one or a few partitions.

+

Improves processing performance because related partitions can be directly read or deleted. Reduces de-fragmentation workloads because records can be deleted in batches.

+
+ +Data partitioning provides the following benefits: + +- **Improves manageability:** Tables and indexes are divided into smaller and more manageable units. Maintenance can be performed on a specific part of a table. This helps data administrators easily manage data. +- **Improves deletion performance:** Delete an entire partition rather than delete data row by row. + + The **DROP TABLE** syntax can be used to delete both ordinary tables and partitioned tables. + +- **Improves query performance:** Restrict the volume of data to be checked or manipulated to facilitate query. + + With partition pruning, also known as partition elimination, openGauss filters out unexpected partitions and scans only the remaining partitions. Partition pruning greatly improves query performance. + +- **Partition-wise join**: Partitioning can also improve the performance of multi-table joins by using a technique known as partition-wise join. Partition-wise joins can be applied when two tables are joint and at least one of these tables is partitioned using a join key. Partition-wise joins break a large join into smaller joins of "identical" data sets. "Identical" here is defined as covering the same set of partitioning key values on both sides of the join, ensuring that only a join of these 'identical' data sets will produce a result without considering other data sets. + diff --git a/content/en/docs/Description/enhanced-features.md b/content/en/docs/Description/enhanced-features.md new file mode 100644 index 000000000..67e25d8a1 --- /dev/null +++ b/content/en/docs/Description/enhanced-features.md @@ -0,0 +1,17 @@ +# Enhanced Features + +- **[Data Partitioning](data-partitioning.md)** + +- **[Vectorized Executor and Hybrid Row-Column Storage Engine](vectorized-executor-and-hybrid-row-column-storage-engine.md)** + +- **[HA Transaction Processing](ha-transaction-processing.md)** + +- **[High Concurrency and High Performance](high-concurrency-and-high-performance.md)** + +- **[SQL Self-Diagnosis](sql-self-diagnosis.md)** + +- **[Memory Table](memory-table.md)** + +- **[Primary/Standby](primary-standby.md)** + + diff --git a/content/en/docs/Description/figures/opengauss-logical-components.png b/content/en/docs/Description/figures/opengauss-logical-components.png new file mode 100644 index 0000000000000000000000000000000000000000..3a488e0ac00ca1703c7e8204c649bb2d677bb11a GIT binary patch literal 30495 zcmd>mWmr{R*X~9@L=hyUq$C8S5l|YWTR=LcOHjH&k!}!>?hd6JM7kS9O1eSVbnP=2 zKJWLv@2NlM=eaJqwtKI&W{fe`m}B1ey;k5Wd5QaXiSNQ-u=`SyqDnB>t$6T9a_2Vq zrE#Q^8+@R@lhm+-!7y8)Kcr5ZJR=y40wyIY{MtEbd)Din@?`QKM7Yrz+G(zps1>fP zq%s|cUq?`2G#C2NlMlaRTd1>VD-;s(N4A9=X|LF2O`SR-wgq^3F{B(d%aZjq6V{}D z4WS6icz@zx`jCZ=DVp;gKPc|^PZUE&!?Wktk2gFKtZI^F)V(7y_Ds!s&xpU-?{Tl` z>Lg#(uBEtz2`l0ufl*c%$=XAo5$<=`!DkxxM_KUAEQ|sdd?Y-hse{1`I`vrzcWaiy z3X+nN_5viQ{`~pYZctjG*Wmo#F{Af3ETA#|nJL$=o#V@fh6ZIF9UXEkOw1aG8nl6;6p#)CL>#xIR?uT*Bm&UHTy1E&`$grWvMz@omiDE7P zeiL}X)x`;UfQkT0yik_?1PkDG7!Pd!VGM8ryKt!=UW%@9*?lRkb8QjI6@wmXyI%nQ^eK zt*yZ6)t*;s?0Ojt_QWqGg-iqVGUQM0`K83+7j)i_72C#r7Q_Jy$`_dTH)B)N-W<8a z5!E8~{hSGNTyR2KLxcOutkZllx6|&7C;=*rwWUbAR!Le~x@cu(CAdO1jsjQmmPz^9 z?vEcYJD%KviDnfQS-HBpo>OV&NJXGif9UJ$`|7!HPfyaG4F`vlGSN_n zePWoffY^yOod1XbsxB1o7iQMhv9t9~y{r9+*)=sk;jY(bYb%W>bI~OwC87ju6qQMY zsrna8iWlW2CGrIY1-CpqoI=#upx}wvKHi&WQQ+Gb{XSUrcKQh=<->;$%Zz*RHNNQ! z^9B1!QL1~Rx8Vpsfwc|gE9Wi;(xyZN1>JSo%+Fmup0*(fZeKiWsIAqw{^Ktw>EKYw zs8uDqxw)B@l|}u5=6HN$Bx;?K`XT?W0)H%e_lM~f*aHz>$L$B$k%-f9L?z6@ zN=h0Unli^7%|TnQ(^m3DU2#|dDwt(J`mbM_*pX~nw0n&wy4cwHB_(7>8ft2B+jEUt zU>aF9HR|BQXl)?bpn4iPC;PZ`YjZQ|{v$5ubI*b;zl_y|)Y#_LRgyoqG>^P8}RrDb+imFm-{ z9fF35v(|B`m&>$DYH9?3;67+rVVj-#DeKRzXVy31XFu+-bY;uL8BJHcwLSgw2Hc|q zc@@OO#76sz?Pc$m#LqSqyf&j_PngD)bLG2%Pj1DU&ek~`^gSCCZM>guHP@hFVPWyn zlg#X5Y{&U%Ty3Salk8qS$#BV<7lUT-6?e)G){#)Srr&-ny*lCzXL2;H!tJZ3qX|-! zrXtnCvB^nGf=5q-egqSIMZbQ^z@VI_;EzeF6ciNnIU|E|1!N)RFf#svipomg#_Ix= z!kb@*(nX|AO>;Bof<`7LavF|*SxPG^MwVHOKOMYy%GrFhHR&d>wY~jXK7}tI@8Lsw zPEJlFctL)?BGfDxK$x`5_|(*fi_LtKN6KMhLc-_GO$!B$%vnGEu`3N7ot<*>>0iJ2 z&eYV@!Zj6`$`N(R$;l}@x|J3&t6QN**Ul%V71QLBY3%v)8gg=>gKW!<{pn%#2?@js zXARXfKTFX(=)hDeLhj>w+Ag;Ts~xg@l=Yadwn6=DMOe2czp8=p-@3gyhoqJE z_8z`BeNYD9$&4LRFVT)`OHVrS-gSuM1v0v74oKb1zYjLK(0F%mc}#9rp)epH3U5(F z3&g`77{@5wbdGHAanoIqL>=<`Lm>qOt95_lTP{D zKh+)=j^^g(L`Sfg07+}>-7i~c^(#6W8XL;G#S@QuZ)_--!k$09s^B5`)5-B-r#jp< zTx&TR*Mt27wNPqbU@o|&1+EU2&kjcwJjjl@T@JEny$J_lQ*&1vACJ)Rh+JT%a}6#^ zDk>|u4S83W4REZ_s(BU2f%cDn43Ce~sFi3(%EU2u2jbE$4|-jf)2d_UvKW0U^Q&i} z(qQ>@wINry6P_g-xm{>h^>9DkAyu}c8^s%ij-Ea;DhfxV%qXx(r(V@@Z;nYp@Ty{a zwq9+bMCVbtyPcgKKNiZ{Ulmdy9k*Fm%YYaFrexN?7eB|r!O;c!vKx%Pi0(I<_mm|; z0Y;}_Gtdp&uY3#4g~m9kbG>{haO{NYc#r6=A1c? zT09z~{VgW$r=?kC6)&5}B^MMNO!NG?8c46XQUb1=Q`O|S_dM8;a@~);&O#gwWBD6)t{qFy2@~8)E zm$aCe@8{2-KaJC}sn?Dlyq&3096jn_pSvR3IS$X2ZE1RTBZ2S+a;5c)`4|}Bk5fEQjuP>z_QRE%_4$0a5NGmTbtwe?MIiFoL zhG-s!==0=a%}QyPoyy6NWQ7$*^6sE&A@q%vrKph98aDH2HOg?1d4hEf>OUbn5En4m z5D8eGVK5E}sP=$Ly(e`TFz-OQEj#p4X;9DP=F(n`DhS4~xG&rbWWJjw6Dd&BiY)kr z@z<|k%bVkcCQRgaa%n(`!OhK0Fpw&kx^4h!Cp!Pu$0p_6W!&$4C_sTHq^CFUpXzyT zo)L@$`;rz$!o6t!g2cg)g-8G@TwaibWfm6~lLj)Q!SoHvrmT+V=H{MP$ljDLo#Ev3 zZinl`O~Ai2b#yRqbd$Zg#*vW`1K#(GZSR>zjXTIGq>9u_7fV3tbmE1$n9rrDV+3Xr&|!B9Kub+0jeAGgIXNY6@c!o~os?MCamZ5Zyz^%R4C0yl z!-&g4glunbuZ;bB7>uXPX-{{m&SC4Fu&}U7(JfhQFimqNy{1dcpp1s|b{0>zJ&;Aj zp+Y$;Elud|-MctC955l0%F4>X)`bWviLe)MxwN!oWMrwgz7|GynFJ-BYQsI)Hm`qfKZ5%0)`W=NVVvHy29S#K%H&A%k;ICyUL0r)1j zyn=DN^-W@bm1rnAKc3irLr+f+-rmW_eO><*^{|_N@_W7SJq@{IgJ*58n^}&G?i|4(cMsu^d1EoE1SsN(T3|K>&h~)n|@(d-ug;Eh*q0eXbx3T}+7!rAbV-MI& zE`FQ8hb-`|bz%_xBxp^@IDle_pHAm!I^|b_zJJ zX$3M9{Ak^~52L`jfV-#*n|$-FcvWgNj!+5llgORD7~JTnu<9R`*go=dRIj@(vwT6r zX*u3W-sDCxydwPWat;P-G4l|>!WZnwRxoK_wDDjyHfeXt^}3qpUXasQw$WhpW$=u* z_!4z-;YN14J5N)`cPC{25sc!qbMl}enbDlX{J~^y?2Mb!M4W1N8xKoc%43HD_1Np_ z*VWUtOC?_gzEKXZd|>gkfqB1cTXH8GrGTfnH8n|wn9uiIj!4TpOXd7-^6Ig_e=`20 zj-=)TXj{L*z(pY3Jc7HZW}cNrGZnv=FM_aROt$-~2Wu36_F^b8kolfCFe5A^4u8!q zu^<1BOUf72c(Ac#7!7WUicfHoDJm2CDA#CQ`lak{F|^^t(TVL2gT}2?%9DBlxqn!a zaKv_(OOC#4lA3K@^;d<8!1{7a<~u~M%4_-8x2q9}iPBRmbc zj*6rp(Ky4hr4z8B7iSz;W4p&bE7Dk)Mfn>8^4ye2!D1UbEi=!@XLcxvw=0$ zLKQYbjLgxHy4S*l%FocuHKaW0l@DlWs0Ojuf4{thrULb6mrC(WothYZRXm07v;;SN z4+J4o@gU9n_B_HYeL~K60An-HY#2WZ0|SNq%>=mnSKs(iB*v$YRTQc z*qc?5I__idW=^2h7+sUpH2H((<$unTRb4FX%A6XL5`1KPk)UT*#{_%OCp2liVI}Zb zr$1HFW(ibpQq`Y>&Z;h`O*?LNGbcTI$3K1CPDyDn`|d^2$x`+fJlb}@H9282Gtycu z;{+{25+xq#=OAh9s#7(}W#4;~a4@1s6Kr}H{b2P~X3UzIw5tk?UkWny0eEwbx+d!6 z_fzKQuc?bA^g`-pV60?vZ2>G71QB+SoJ^2e$67R#jGP0*d#&R%?h z@a&=(eShuIozOTo8U%xt9?#dOMhRb-gCAbsB`EpaXI|~PJMaR{HS6L)FMH)@;fOAx ztmd|>$b{XTSt-2z`n!>S`P40eA(2#BmSa2rb}+XuYhPnWIxVxm9llA0DT`85RZHHL zA%}jVnN@lk_ROu#bNAg42i&G!;r33haoU#c_d&Ee=3Zcf<$1@{*u+~fVNQ<<8E~+j z&#SUeN0F)aIm_Y3wP8se|S8nGXnoWmC z-X0T;qePoq5xul0uGTvQz^-s=hCBw_e}))}rt9$|QG!6(yzZsG4dxk$%Al zJKPw!ba^DQc+sp3bvu>2@;5#RLg~)46J*ah;KD&$*aOStep)a$=V8M)OsP6A)S51@ z=206YF(+T?`EzW~Gx{8lrkwCy34}M%*^#->uOo|pn0U*7o>4*O!M17Z0!mcc`!<(t zYmIXqB2=?qR4*^SZ0QwvEp_-PPd!J~Hqo5I%4)ne-e^A8_|DUe*feDa;Rpipw+l;x z2Z4ONw>o84LkYqnGeB~P+>+IW#;J>D@H`lq*|2Fxz#Me+0xjhjT|XI_e`MNO^6s84 z`I~$sefRU7)S!T_u}h6NUvb}31c*y~g6mS;gJp(rp64T?Zak$shn;3{_ey8Y5ja5OqXGP(z*7r=8@ufePwK;bX6t$8Y~gpKWd|_=^N+PS$Wniff+%d@`sWR@7BNT zL1|O!Gr4TJuY3?BCYp63?K60h+ zh*x9bFdnO#?D=4sba_BU$_xW^A2)bazRB|Pye@uztHT3-3=bLWX6s?gZufH4Yov-8P|pZ3*Bk@=|Al1{xu*l$2NyU$svUlkB= z=)~dcFg?XRMCCPop>qpnL7JUb569ACPCG>}B%pIlU+;DR)fgV(n$J6b2JYo)aJrNHdo0!!%9;+Ky^MK`D^-WXSF1h~}tm_v2>Ezwj z^~F7aK^~#zFJ2Qjnf|TH?fpWg zx9?=8cA-{anLCz9e8=Sp1V(}h0HrGidl-sGct1HH&rh4)j zlODj1@TaY@YuPHWVQr%!WfECy294TarKw90j5OFHj*n8?u2m-k$)K16 z5!yNsh_>xYn=S#_W((5L4SZdgDg|7lprcvwE}QF-lg>A!LFDd6mj%gQgR5Qx=|N&n z`JAQmEF#UG=E|4Mq}b+sf1Y8ZleE!V%p+;0E99hV#RKOM@ZhV+A@csKA92iauh#b48sgWJ6)7jPg5ggiIqi+$}@DtFnI9T+6#T7Y>o^LHO zj>GM@ShC96_MYdu3rmtfD0GS6RU&^m`dQqd7gw5Dw()AP%e7>tI9NfEd`_MBc!ey% z@($KDR z^h09Au5|91!yC|8l$PyU9xr!f4|5X3y~$m8yqA`dks~RbX(i!UjzzOZNp?dcMTl({ z=R93j1vCQX!q9|zcawHRkjA_Vh*4Ha$<}+$U((q_93_T-v}RO6X6{C!f$M<<|2CkUSB~aT&u^% z!0w=744?fEhhhckUkF?=3u4!Rl&>#jCrwCEry0~dsp8VqdqH{(s z``;4v=`Xm;xAzlQB~atE5me$dJp zU(T~@lB?gz;SW!Snwpw-9L!_s?9;D&yml0%2J@xux!)l-KZkvHZa;7*gSY$)Z$wN` zdGwQ$>|9NIyLtJFDE?m&r)GMPGSr3}rLO|ykAM>+oclXnNw+q3q zZpAwaf{$#mj@qbX+Jq3M{@F+0=dl<>X6Mh=XXmfDz~)0Ax-PwK%YFBmQRKLV%I4M< z=cA4;z4k^d_x=@CUAcay?nGNVF?owQE2-`-Ttjhd*#i-K?Ma?C{+~?1x@)g4;((X)(bJVUa&m2YuSZ-`{5!6UDgglMVnY(RXQvO+&YdLqMWSg9#S?NWu-9k_%WjeYd&f!BMN zKR31ey&HImGB)C7Z^_Et4$v|Xp|bJ-nEGEp1yf=-Y;cg`oBmSK=-;jFhQZDG)C8LU zMq!GE{N1TO?>j{X=lB1!4gOD@;@LISl)=p@-Zdckfv;v^vHwPaVjJH4*mnQsR3W?n z-H@jc-GLIHzzF@3@gb_b5i}i zy%$$Y;eyvrr9BdGA}>HwsbOGYO+o~4{M6KxVm2!1V+SB=-3(tF*d3gmoh6KoSAx`+ z(_Lh8S{L%JR#?2aRkk1wJ*`;p`XDYI(?637-`tYes084R{c?$1*~1?J0+Z$inkzFO z&8c{}SdPj6FD(EtNxs9cgqt0Du*pD@%NiJ3r8Excm95HlMVg+TzBD#2j%9>hr~hML zMMZ^8TwI)1g){`a>?^9Oa`E%?UqwnzR8CnTNid8scJ=l4YS|;MPM*op0Gp!N&3l~0 z_nFxF#+sj___nO3U`Uvnu6+bV{kjYU71aRZcw}lyqrf;Jq0&f`F&5M00L^HIcF6cOUa&oUdm_B_=XJ13lmRDg#=!nku8wx4eYt@|+xTh!T(xJdb2l z|Moq%y$mp4^2t0j&DTd?v+L?K>+0)8u8+->(Zx^6-2Ml(BY`xPSW`dulUr7lwU>kk#2nc%NgMxyq zRnt3;`2_`0{fW=RYm2J?AyR;DYfdS71nmk|5#|o_TEd zEv@5%w`_VxV5`G`Kk`p_s+o$4N;j?o-(&E3Fe1a^t9=akN=nKyGsWK<5zYIdHUR-$ zfD`1~17>ouZn63K?M>e(^OhV`Cv;j8#=!#^VJlDT7yfv%C;Xv>ml-4{)`#mX_A{GE|%b_wn&@;a+@;Mj?oI_Mce;ih_Fy zYI%E=`T0`iM;&&Tr)!+LVq#)F=_39=q$T(65$uMjIhwIp-hdnzGQCtVu-NJY-E?ZJDq5qkUkgV{znz+@wweE8eufrY zkSP&4p$pot z!A+hyrrMwkIzXLc{ra&%x_*0l;FXJcWlC(P0(Au@dnM*wpUU0aZb;D@S)lklT8~CI zE6U0eb=>~>7%(VhV4+}(Q@hASePjp+WadNcNKjXX$4?}2+d(-9FvC0WYmo9ODW9K* z?oO3=uE9Z;kO6#ZU;ML9+OGhs_dd(pLt_{%(kOo!3m?l@E_2>D$S*962Fr}=!Tq>| zJs&KA7n|jPZw6d>W(Aj_#|_z^(WF&HZ9%qD>LaQ2K~X8-8RSQ z^NWjPOjA8n0qx+5T3asPA(kEJP>lK$I8!*r8a|?WZCpBG;HiMKz%)&bl2Pzhj8fk+GSYkWL+(Oa|3?%uPU+N@blib2Z_9;bXV>cZjaXFqch4Ldl zi!R^l=V;Ajx^b;lG%T{u^}BVwqa)b^1sPRVZvn;bdAUU1+v0=fVI9-y!*eJHav->g zKUI3+q^G9~gUcBysYFg7-4Li7o`7@(lJ(Y9oNHRAkPIpBgmpEVRkDh|+OL}TnQM&) z2e$e7`H?^nBQ~aVUq}#CUj}A7zbc{R8P0$F#ooaIRa^XR?LK(~4>6>e$O2j%NXURq z6#rH%dybs|8^}MRfUYQ2Yh~fHl!u;wgwqxr&`Afh%P09t^m zZ04Cks)pJJI)V3z0``ohFFY&Q4LdNfv4J2CNlO@H7P_ogws0Z=FKf36W`uY(_;wQVaDbtnni0c#N z67oFMUV~Ce6_uE^H&_S?DgOM)i3w>HmGL~Sl{w>IKr@vq2I5x+T1B9R3B-v)p)!Z( z`Dm)^!QJQTuizf%8@V1I8^MC1z*^~gw3*kNBQUdcRnf##K42Km4_5OGQMb#sk&meN ze(;1{h7b1iNJF6p{NMG%_nt8rO=g%~{VX$B4a(lM-X1(zo;ARt*uN#75($95pnj84 zY9@V3MSnT;aoQ3v6f&>N_X-bInzw-z%{`^&*y+#j7Dd{{wNx+9*S|NDcdxH5R)9k7 zPv4^*8?au1)JFx>W_Mn^R-`s2w$wwp(B8RFhwRHwB9nIj7FGEam(}>ZUR5B_I;1}! z<#p@=kqYUzFv$hBaq}eV$SHML@aI)(I;7Wtpbn1S)k>cz&F|Nl5w`q*E?Lr#bHos# zN<^ZuQrT5rzwWxn(m-K4m~x+(P}BhEGE=m&T2^(j zUN?bqBGu!SiVDrwrt{4_*fS*{*@F~hsZF_1wbAO2B?D>-QYOo4gwLxdUH&_z8-l9ApyxIK()X&NK;eO z3W%v*lo;0~^q*#)Z>lx(91igAWtkWonQH=h;`fRBomO05 zT@SixFDm2-z$w3O{(tZ8-2#Kis?}%L+w;u`Aq$I5-1&>SlXEv$*9_@<&}mycJ2876 zdu|>eIc?3k!~3sFyQrRk^M6%tRRaOrye<%-KJefGwfLbSkUks?iD2D#a&q!Ay^*=9 z2;`{KG(dYJ%BUuT_MqqUo)80z{9nu46<|_sC@p>;pX<6SX!E! z=Og~KVl@MKfg!$80Vc_dKg9yf-Y zcbZ^n*w)8JgSjXoD?=n3zz>SP-~!yFOe`Z?U_i7ho@@FPv_qzNzwKBrCHiHKs17?C zg0h6%L9vZv^imDtt0ixNjT1_|WB@=3faVq!ff6E4CH%x*4D9iM2QTkqVkThRMh3t5 zK)d}Yk%9o$PuTW8K=$lSt}DKHB3AzEBlkaJ-|I6z$SAC2B&Prtzz zgmB*RB%{O+J%qN|lD8_1UfTQ}1vncI+Q?JXvdKTO_k}wDdl=f*Q=kKd#%)9Yk_R^< z)Mh5Iixc^0_%qD@b6;?Dfb-MPKgv>n>NmM}Due~yIMhhzp!vTP7?{fZjCFOl83&m7 z^P4ugS)q<@=J9>UmijIPN<_>!|3Rz!0hx^&$&dBp9GDwS$o97*_rJ~@T^V`=>3*YN zSl2=}5Ln^8ER-rsjq>+$LR#FqMO>p_R$M+Abb(JnOa?rt?oEFVp&=0{pz(j~qn{-E zuO4f3Fm-MKl_+GU|1u0vTQ>rY35~C0iNPbokkZ#PeTK)tOHv<;E^v-!~J3e0|g5vOasf zEI9Z6-*bne0zc_P7*EtA=!_c=VLqvzmWx22g@h6y99Ri~pBW%26g65$x9H0o(Sww% zBA&9VD-W35KP`dL*S&;<5Zx`ywzc4rM}IFF{qu(v3N*+k%lc*NjQ`#F7tv1?xbF$9 zk{p0tQv^YlSJKnlgQws_NK5T~`tXDiyFJ_cjx3V#QS%GiZl;BpPSH!tI^*lZkVRg=&|9&YEVpwg zfHCq9(U-JX6IqpVqfH|$oQ`$RmVW|Jlwo)!==r81Y*_NO4skg}`K2Y6KoQ2#b@Pm` zplmXa8`}~kQh+7|TW+z``zb4xt9o26)T80(q^M_tuJ7#^$Qa@n&B4DrAqI9Fcj;>O zQrScE;}deR^beH$F0vs+^*OQ9q{rdzo@e6vLEGIUI&bxnGUodevr6#X-o5ipWQLiZ zccCj=h{Pal?^TR^<@U0}`JB4h0a7QAqYn=QnH3mch2ILObRNr27(87f-YsORIZ4eG zkGTU2N&h#+M5z$}T?$t2F?!~@d$=ttRr=(Rm|68!w5ezF&1E0b+-u*w6VJL=C# z!d!F%ZM%=_0D-Ky>T&X%z4CXgC;{4?dk$--W-3mO9-d0fo;AwWdhmPqWpW$!9h~?! z4~CS(w`fmYUQ(O>^|1KTPIk2Q7$pUlnoK)GL8${sfD`` zFFfihUvg<24vjC#r;&Xtr4v_uvCWeIDi%=qy-5p=A#&s;RIW0|FNqEz48+H)6Fo(Gv{*u$f0W`+Ku1C2axP6sMQ{CU$?K4tVaU<3BW}9o%p#{ zKg7kx@6}TNr8zShdP)Wrq)qJ_6`Hg3&o1J4BlJ@XFLDdda3O|~lIkk;z$8xQdV6@O zG0f@g6{Lf}#-^_-Au8`%I-46m`ZrwzH6#cyKf6=G@^K{wNFVUDCY_%igmv(ka~~H# zA{Rh)Db>8o1~y~WKcl+BM2?vT@dR1MPVIHL<{;o>X`MWkfip9$m%!DMB1@0W5vf>F zn>}u28M|4#ctrKP7|&T8rf*&CG&T-zkG=~>MU)kG>VTFF;t~2kFY=&}E!{;phr!L^ zacCHK@|A5Q3n@xq%a(w^A5Lb})9>U%v{O6UY_mYk$BjkE}nT;=Mi%VB?{2P_RFIZHw})5xrCNA93H&>L}kT|GnWQ6=oLzN#%ZVS&8QK34r^jBRw% zQ&?0K1c|`Y;=dw+7#WGGCvv_oG!&h}(snU)oGg;nI4vl|IFcvu>oNGfsRW1Y)crLHwWny zv!3Re4boS$M_8A#eIQqWV0r>14w)11=EYZ7yHgC;I<`Sgj5#4z3NqibskB7a9Gow@ zRNPjcMa9OmAnU|sw;%gK@+Uw$Z;uYwL28kJ<(L_^k}K@IsBN<%_zaIywX)H2=lNOZ z*C3D=%=v2-taZ-EY7Xu$!^0&smrZ7xyVs-#-L$k6<~PnBhk}4~)f^0z{7qikyzjYJ zfYd3bQRA4pMtH9?rg?jx`$K^Op6=opy`?l3j)kKn`WN zo9G90LcUI4Pb#Y1$k(p1wqNDWrGcU}aMi4=8%UaD87@BbeG}N^K9vJ%me%t;=m$tY zRAPM<50p}Xx?bU?xSZSqGiRJ0R$t4fhMTH6A4mgY{KtrY_VNd&$MCW~uU8-y9xca2 zYEidOL~?Ivg*UC27r8zX3ceNwDy0Y5`)9(1E6hOo1ZMg5DOGxoG7y~%c2iPScSWmR z#8*@43Vas0$fd-&l-PdDH(y^32>Mh*Mv`jJB?LU5!}|Py9h<0@^4Nx6>A#J1vWiM+ zn|T1w$$3~*rhHObH^{dC$|)?gPWhrCV>Wk*8-Hv`o|OMPf^aZJgUiHaeBK}NUf22L zT(OheVe`J!Xw;OQc~Q$lO4G%{IK!U-8;}wXbw&3H5C*6 zc)Y|4q$2PmcGMmQ%kw0~JuzkM^AnH#Rga?=wvVX(6k`K~Oec|prreN~jbgJ{|u5P(U>%j(RBa`hrBzV}7Kt=O` zpF?7nC?RD_zW5;ML?HC}<`{NIDM38pm;mOLn&7?D+H=ouO%Fg~5h;0Iyp8r|Et>|; zSQrq(7f-UQ+}EOAbRkH|et-v`c{pwmU$1U!qf19rD~3IPO$yXTc0|hJRYn8@G2m7+ z-VL&#_h?WR6?Ym4vp+KnS#z88QFr$*_~FX{$f%_sp_tnXeCgqVp>=jvW68hP9`@Gt zrMhC==H2~B+Pc7qyQwXloFFP}avY+Bja{1R#`1d&dbH)bBn4sDb#x$Y&F8_TstX2< z3)G?`hr=bT(qswrRMYS_W4J^!dZEGQ`K8hKlW{Ga4=EP+j`1q1g#^pys{FirQ+WRr z0p$*+dM#RA(4)PB^gO}icTdCi-S=D{>^Koh15Kltkx^0u{`4cNz0mznO?BZ%_#ss8 zW6957Ir)|FR`xN(I>ZYe5n?#bpz`{)YCIXM{m8=^_)!*^3aH7lfdH=7$ZZx*^$zo} zi)MNY$RQ89if|^sT3=NHap1bEMtR(MU&CUu+NeZ5UyK@Jd@$?iP%A|XCN}xiAj3Gm z{^w}(I;67%+M_&%vh*g*jq}T_BrxtI#8)r8$jJTg4aWVtJY~hX^9K!Upca`JwK>Dc z+1C4zw^8wP?$0F*R7{z--yGA!FVTRx&obvvKda1sy!YU*)A{D=N^(RIq4sp%__lUrLWWYBY%hl@Sp3+9!D-&KXhFJdTxH-s(k9 zWU{!LIQ-!P5+m{c3<`F3QK_|~9{wS9Q&8y+sz*dJk_WkF|8&cK(?{_hrNrMJ z3+wvOVWAAy1PkXNUmhiu+WS?l9?p2(zEtB~1Gd3mv>rPIS>?65uFMl|2P*!NT@~)2 zY*O0ozbC;@^a9*?;N}v|!JnawAGMAw5A_EkU;fl|Tz1>wtiyfHQpAoz3VE)}wFSNQ zX@A7D3rGf8hK{xNTx2K91QyMpsMH_JuO(TVCoe}ncWyor4F*n*b9IxTZYa4G2H*4N z^q@TqgSr(`^$y{Yl)!1+T!LvF>yVV+B zKQnFgRhiQ*Y6jqo?S)>sQkwgCRy_=|Rv;3iK(%x`87+Q~pA}R&Yxx8Eb}tIYYTZp| zn&}td+g00VCDg55gt+-+E6+bCbrkTj3E&pXkJmBFB?!cOSA!~EO;l9VsvcK6Dlq%& z)yII43c3)7UZvUU&8FL|`yg#vmQCgZ+(#PJL-FqsPXPBgJ7%RXC2@G?nrdN(R zn>v;~ zxbAg1#pn$_@v~S3=#=qK^s5O+?G$MKb8kEgo&dqB_?e`cjRspFG%MwfX<>d#%Pao? zY6VA5#{wwL9V^EeyEy1tzhYIT61kO`!G?xjT@cmV!2&W6EMR&mCDpGvetnJD9p%x1{h7FKX+F-)T6+y9^cQES z$dmC2EiZA2INSH%BV#3&Q}~H>rQGcHg$KvG>hpqUDPkk_BE)aZf!yoQz_n-`ErcVq zY4UB3eCa!d57#$e6x$YWO{-xRk!RRo!-@~VV=0GXPDcU+U-7{tm z`^awmkgM)CPd`0TZSD-Rj>B!^(@?|CFq(Gd?cp}7=7rga>4VxPX z1=^WhsN>4Y$`-&&Bk29H$W16LoT*!1M{1^%wX5ezV9RD~j?|#UU%z<&J$2lI6i9_+ zRq%X!u`74n?nBNAji?=D(}6&rJw$5TPRaAa(!& zE92^5!oK9?NkC}iOO`wmB#OJYeOjg_C&5EW5a?D1@Vg}NIN$3q*n`?PJ}uwdc@~+i z0j`OiVTiX5cYqL0G%VO*{VqWD?v;E;qTtakf)Lqs0_bYe+!p=c z2BZ-3*a(ne@AX&V5Zathp#1?^2oFyzolNW{cv4>x4k>tF3fA00`+Ew2Q(c8u?!XiQ zd@LW=D+Y}#R(AbXwF`{G;y*grRD=*$p~(>OrEjAfSzvr!?@>!)1aZhXy5ywOA!f;0D69xbW?HtuJDt?kI-8XQ0qvAZC()m9h-fgMC1bq{Gq|U z8OU%tnQsX|F~O;P;MC0Fb^H3+vCN_Sn*U4&TmS){L#Sekgs#Z2jPE{z5RUy}?ma3d z>|a;gx0rEY*3Vi$m4F41IDJZE_?P{8z<+M+(t6wHLk~8L5(w0E9(R4xeZ_rC2oFMw zTU+kv{NAvI1rQT#N#21PR)h`_hLE}fh1}3R=;aKUM|Ae!sZ?O5Uf_qr?oV^Ts4V{& zmGUuUR0&|z{~FB)XfB*!+G@Z^%wJ@m0o6JH_=bswLVkwK1xA<= z5f!!g8qYV_p)I-JzMX9{gSL{yN|s2T5oQ82i8GdH3+G|IMHP)=$AR zmU%OilG<5o$W?gjjwp=)0AEK{`~8n^Kv1CqU|Ek67H|vL>p3v6FTHXOYcVxU6g+SM zjDKVE%wb6=SSY$~W=sHpbl7lrUmuX|{tXTQ^LwuohIW8h1_0;mpBq?Es3O|d5eA*c#ORt@SrBx-MEnr;DPYViV6-HadE#4x*plg;V;&#gd_KF zizEbO1xqHh=u0Z&(Wr5Kr8{?lu@_)X~zgQ^a{N7B=9grk zug@N+_~^kS$bkR1?CbBB>jqEa_6`ihZj9#kf>+vrh#6$yt&IV4K_6#O>}{SGO(@0_hCU0XWnGeh#Ctv9Y76NnjLoX930v$UBSFZlg^p zDX9%$3sUB|LXUUvz8(Flv{XxyUt1H;ENwO8@GPP0X;%<9%i=Gcr}GX3ayc3zKvJFo z&gzUgj$z)6W3_&E_YyejBcQUuuk`JJ_)}I^QeqDgH}nGz1(}41-F7&ogiacAm|d-Q z&KAMu-#?Z?UpG}%RRtRxn=04zU5Azu-8|k<@yus5H0?B01nVM{?9Nx5%Z~MEGu9`Z zVv4uHz`%#V-rk;5opwnf$ewRFh&~JjhXp*z2COABY=qgF86$P`FfF{ z+3X9uanI+O8Es`p$13nL8X0-{Fz5rkb7u_!JQ+AwQS%B<47}_nD>qjHXyf=1$H&J7 zUJOAQhlr!EUJg19XV34%*iPzG{>f-%z+@Q zlY0^^qWHOxIc^JzYR%iXZ)@r>PpV`N|9ngt2VOuVDClxvq-D@7X2w)R@$BbI$s*pp zhVm|b)*0aW4Iny_866pE`t4Y6iiv@tuFb3_?&*20|K?44-xoHj#=Jbbm7GLtoIN!l ziOhKZI3MHweJM-JLhV{xu@dcCO7K8E^p+KE@FJ(Q?Ckd0xw({0+gfTWs?1p<_R1RV z5`~NN^DrzdtW*U#Iew4rpQZN#=WfG3Flm%gXI57`#ZK=uwHvYb4|I2**-A_I5P-2W zf#=D6udc4R&(6*~oMzrj?0x?(WCC9EqzV4ckLvj$tt2p+*XZadZleZUCS8!dlM@25 zGwbmhPc4D<&zkBHK>!DZ#RIVxn8mBC`j}WkDvf3n_ImJAEUU6;3~|%J0k?riBu-E2 zr}znHlBEpZad2M@aF-`U^d6SfDa>9csin!i5 zG&Ho_SJJpZMv(dNE^1&{7@2mNk(7|VeT4ydhsFs`S46~v(pGUY^K5B*;#IYqKSsBSnHXRaDMpm*ZQVJoPa_pJC zB}sNdl9_#yke%(=M2Kwfefs_0&-?fLNBLNf=lPENd*9c6UDq=^JIf;xahHjjI=VoI zPv5Yx-yo;w3?mE6FL>7A#kE#89x?_&Y%7}&bFtFynW|BW@bGYm$;d#lX&}FT(IC^= zv%x(J(?Df%8C`JFgPk6~A>-o!PhAc?Eia4Kur8fg;Lm~HG@hT9G~BSzI|MHQ`$yx? zRC8#ZZmv!bfxwGCbA~rED9Bgx>8yg15)szlKbYm?w3b?CO%2Oq@BzAXrp5su0E@k& zA67f?#L=|0y$ZaA^NWjoGj4|%h~=ke1*g7n-d4h*xp{dPk|ri5*ggpC`N2m8wFhm$ zIL!=-jMOK=5$tiADd>XV<>!;Z-TOE7c+Y&LgCuBmbz!08n1VUDSTHzNR(}G4g~GA{ zRq~_wOQx*63GFm6>bH*{Ki*GHPA;&rx=I-y99+W1$@y*yk009Jb`1&*J>idGclk6= z1zfRr#l?XS2a3M*lq>p})WBh3IT zwv0FK>-iX*)BOI^Ue?GHYvDfxkNeGjag)D}o8SnW@=(nw;EIQb@?`U0&X<+OkA8QOq~t z*WEUHCy9xV&Ye3qQTe3qK-_%cN46i(OSKoBq`GXxECK`f36510a4VW7CUOSG#)T+K z%Hq=DUp|m29Q?2mOK-+A>~O$gMn*>FVZt3F`wTc<*OzYGCPVeqRU+Yw~N?^gy8DKL29o3rdN> z+JmjTaxyX_T|+}V@-9^euGg+L+m%@U;YqaGy}-_XRoLfnKMj`65x;+B$%<#Ka{;=c-e1K#jN<7oczgz+KY}_u7P_kiq`J=S3F~4l=QyRpd zCO8y%0wwv8qoW8BF>xph>$>9o(lbCa$b>xZj-laKVe%eT-UA=4|lc zbL@Zx|7^6{?WvuiA(PwT&YY@|QR<*M`?Rj5B?q&tJ1rQsG+b9!P8Uv6e-a1Ugdi3M zOwk|rgXLGRUYXu0?DtW>d9(bY@n@$1lj6XWWb{ESOwq4eT9hG!(Y$+iSj*5bkM>q? z``54EELz2rCV37$H6HdC;4JQJg@dV0Ud5v!>fc#x2P(@^==C9(Np^X8p9zRoH57sP zQ5eR3z2(HHvO?3k!=d{6dSm)fuC(_~yB^|L~1L)%v5E!Vx z_4jX`^ZGO{#-jD|naO!aHgLnSg-A~9vmTQnlp=0N+Ei*gO=)=d?qpLi%?W~IoCKzA z3*2Nnn;JiUa|Jo*_>^Qc)_54D~= zS-tcY$&maa9S93bV za|c+gXD%6^$GJ`VGkiW1FysX0jj3Cx@L&-7G!YXK#e#*Lqz!Q0uM)23>IRC9y$Ck> z$H#ngZe_@g3b@Ev)N?*1f&k!77*SJCpnE2ooyWN^WG8$Emo{&EFf;QTEP0jtYau7& z(A>aau$2w$$iBI4%m%YiVDLrR*(bdZH!_~xE$k1Aj*h-<(JFdpB5V?=-|$=mqNVT` zrD&Il`sZlKMg?4$ZEQW@%IR5|4p_)hRbBc;5KblUgxCHO>g&J3$O9ljQ``nz4|2 znV9=hf8p>RI1#-7=9n}sRw-LMcOv}Cm>I;pitEb;+#!1T+3APYDss58p+|qRBIT&1 z$+6u2pmBjSPzKqr>V2GcH8SAdP+e$nuz7RDiAOpA~pA3AAXy( zr~PVWaPc|{c~w%hrqLI39pJP)HhcYt8Sk{KZbeEaVuA zZD6Jc3I>>qC&b5l`>ArT({*k*BJUy64%6miR2JX!{b5lTId+^kyT4OzZ+R&`KEBQe zh~`zvDO@|bnkp}nQ&J{DtD`jlT9u0ZNlG6H*=oiZ^=X>PCMCgyCrx?iXenMj|LTrX z!5z*R{3is{1}dv7izY013E|c9gY3xnU0mZ+aQdq2KkfK3TRop3kQ~n zHBu$MgfgDRSQfp19|e`}Ah1#oQ((zgf`iM&qE5@!pxQu>y>B1^*mAyoyQe+n%9C2y zLVS(}sGDIRl4yh^O8^ehGX`|K5in6N?nd)Uc+p{e!8s3Cm569 zNQzA1uKxa05IKzC!~ms6Z8dN>hajD(Tl?^NT3UI!bZ?8`C`s$&;=+A2%_IN72TU|~ zuZ59iI{XD1?4LT&$XtPXCZ3*wq00M5bV~`fOrVIddLJBEb3YCHqq8z&j;7W z`xgz~D;(h2jtpuakA4mgr_NlRTq>>$*x&b@BUnCKi41v{4pq;MR6gAbe8jyILABG; z)3*vSc?m%$B`5E$wnnh*{as#u)C@;MwzT=Vb!i~yPCRvVOjXNXmxpk3P_(z9ok#kh z+|W>0cT?fA5o}2kc9R(OtH5Q(LkOr7o`uTIW;%L+KNcXV!w#Mnhm1lK$DB*Z6 zXk(RuyW8k<^w*~cq9uhl2-N7n*fS+GB;-CVE$#kwMa9NOYM@X`X6sP1a4?CQ93{b{ zALaDfdVy`C7aI=`&r^@TD>Gn<#w{U(ZBf2)gA77@;N237lShYpt3I4DxCU9S#752- zEBB}4aq3T!-eq{$L@z8tndgsaCZ?v!;K2;8ZXY<_hYvv0@sXfwi-%*?pNt{G^bN|b zdt`(h!mS@TBRZ$cc^N`L@E-5$E6)MDRJ=!Z>c4rT`kMrrkjDri``*(4d7L{Q&beZF zkV$^|?^$-sC>G^K_|^+>#T)7V`O{YG0_`4ewg>QhS^G8AYA-cAN-7`-`S#~-IHXsO z*ViHSbzoxanD)iYbqzw8XS_C3t^1~}3*;OkH71ni=H`~HLKqM$0vZbjxT(@lXT$vF zb{-=Vk4p?F&V;u#Tz9jJz*}L_bBz8WZ_IV{>jI14te)~6g~jXRa&&l51>U>yHLz1c z7)(81VzJmTpEsy*ZNl*`=a~qCvCc|Jky9%S3nf8G#pC5lg46ehL-%vE+g~k{SFc~c zMj_yLqgQ0KW8mPhR+=Da?7jo4n_KCUPq_(xv&a9q%g%4QN3B~YzP@fL#`MKZh_JIo z?U(v5-iP#X?jn7=HPrH^fXR$7H&#UN4_#SL>fHMVVu4yB-%Fd2^z85No5GGfz>9Xu zh({^i9!9jz$AK5o@np;{ho}MyIEiDoH5;oMfc6R=|yY7Ut2iESLV&f|1Wfy6ANR{$k zs%pAy%j1vL;~E+og4Onga7wjIFkvU6Ssn*%-%#h6UL;&9#9l$F;Z zgCyx%+I-b#%eGkTJY^GI^7tS;ff20PuKmI8i~PPw%TsN*wOl+y7>&8m;~INW3#i-lx_XFuV2UP=ew$>m{?HVW|Q$g5rTj|=hlS)cTz7HGs zWSgzUQ@;EkTHt_hF#|(MJ2SIaM_}$K3NxF+7?8WTvLe^$U=*jFTd)HMn=Qy}dn#Tpd1cLBR+?l`T@e@mieeY{$7o#<68b#@euk4;Hzwo=+eKgG+AR^;GwGPT`=7i7 z1T@suQ-?=KS$x3Xc<1@^=Oys(28x{wToRRhe0&U4RDv#^KmX!;$`uV(Knzmg^zu@D zhsi`Q@T{}3u&@9qvQiEj^q>9x_v>I>A!&F*=UC~fkgYxX5@&T0a6$b7y(L|>2W0IZ zP6O_&vXk2>0}4tfq&}hX*EoU@YJR}RX+ROBt;5%MTRg>174E=BT|Mj~ z6n1oMY@IEwt%^oQ%v=7qQ=2WQ_%QaHi@MiAo19f%e%T&^%`YhEEHpGauK<)bXC-o{ zSw5_8%_}u`xzDn0Qe0bPEGDjOqkWxO4(Knf3YZ0 zfIsYI>fZ`F7+8SBx_r7m5JaMe9;MSWG7e_x@I5IN7@o41SOj=*G4+K*T?E8B*f`fxg0h^yau0Yhe zVJ^`AU6U`a^u+Pwvu#!%T2btx@D%TQ7RW)W+_UWB;j5j?OioJb2zf)AetsXCni_*q zRJ;d*!jPaKl!$}`N+gLK3cQ-x-J0Kn!%{0tP+!?gFarj?;;~%&t>c z%G$xPxY_yHv)iG~7_cQ6JUsN4L^R{nE(1{LaZlvV74{}|-d~K&9=`@a@IhZlct-%U z*yiTu=#)fJjJEBp+hK${w`g-yQyH_AQ&37IJ}0AO!s#wLT9k!8rda ztbPJm@*T=Q33YCog_RXOkpCyqkzSKV#GsCs%J2KBfO;?~vGVgbNwba+FAOy{V_TG@ zeFhKZ@7oiI=PpqqBpOi=O1dDx3I&)qxTaHbV6jw~hFCQ&{utbBi9o&uBeyU5wFRT{ zHI&wI-!9r%ZOP6JkpX4iI0xp|b(v05QwY@_co2wgFa}H-#LvM0i~$a$;rH%Dpr@?v z^sDl+aAL`i`?4U*vD#;|ds{ZD!|@U+jg#+T4q_T7=jYtUz6pgT&AExTf4Cok{{{uq zGLJq>tKYx$hqt^FJWEVY`sSynLt9LDwDPHSEzZgiN_qczz$~~ND`g_^pH2cGBb?vI z#7s}$pAj3|Yh60@*5BWs7Y?CV%*nPIrZ;-|S95i4{nn6hxR6b$Da}^ z#B>e^Eh9@1`{HE0$)=luu1y&o``^XLoSF1~$n#(!*y!sIn1LC<(@*D#puwdM&@*)X z+T+w_U}G!;q#!GjR1iA1b1MSB0(d6K^0uUzTSvLDPE?qmEEKIBlVq&kof#3Kt7I|i z>LNU+)rELY6CT4lddNE9Nm8ZUh%(pZ%es9Kt^0?E&$_^o--9NQ|Of%)WdN?l{di)oUWH{uOThNx>9W zBGBd|L41$NT%h_KX(gpBcHchZ>ipGcc|oY10i=AR9RHM=C+X!3YDZz6g=77U$$|2} zZ*zKa#b}49a$_hPIB9lB|DM6Ja_-=d69$XQk%tgH$^U&V36II|)7Hp9Us&Lz;S{5) zssctuIY>_1hToej_JLUaT^F>LLLptK$}cRu__N0aB+{N4U3f1eBTLT3ai|{#iBIn zXSEhYxas{QP(MUB1~gsMcC4$;B+o`RTHY9mlaU z99QK@I`_qK=ZVkWFoZ%j@9jx4>1Qk7KwqjFzO(_2P%bvFKiwPPw?9rP)JqjY#X}uO z*w&(|CI>BfXm^w#W0P?`ASHHmPSQ>^Ze^)P^sagWL zyK^vMB=1(cmbiiZ)rl}@pwOL$!-UJ-oo(_mRBEp`-MwP$Pu4r9fvUeYJw4rIc{lxT z#ba5iAG-a}rEUVKg_Uj!Y$g;qNbHbsAX_wL<&-$2vMu#ga2da3sA+AZ*XFfp!pJnB!F3R3HNfHB47<&SazD`-yt zK6|omyZMqFHT47_U*)LwD~|>>V<#%ufpFI;dHl$EV9P+>NP-sIz3(9a}%?N$(e zeWqO%5Q$dhkW6sofV3@K`R_JJH3|1W>i_cNrs%)ig|nj5OcxdY!YYF7$wt8qDO~K7 zY|^FL?I|8aF*}*CJP402mz?jkLXxv|M!LR>uHG2jQBEL(5Ltx%zMy-?#%__HQVg>f zuY;zA*Vu~2XSh#K^As4(97XhxOwLus7QY^0=1j=2dIX9^u!*9F<1daQj1R)tq$MS} z>8OZ}?x_#d4m+P6Fo1@h=?s%4{NU@ZXC8~g${~%Hga3RqhIc~Ed+p=C`BK<@X|+J9kI<0m z0351{FrhWh_K{4jHpzoN`D>3M_*y$ljRKDU+-^}U_g?_e^H;XjmJ$?WI=!jGlJG|s zqRDglF~~|{mq#i^=KuqBuO)<}zwFSwtmV3I1ZR2*VUL>>KLI*{42ZnPDgYMmpXz19 zldf`Lt&fmw*gYQkw00>yogu!evweZ7Q>7RZZ+fqpuW|Rl)B`L- zOlqOG(T`VPW*7E0U>^IpA%0F};-a@a#}FRlt1rUk#C2PuhcztWpr21jEWs;aze{Vz#5WZCEgi`Q<1{R`tG)&b_asLH5)hv%wq zvC-ea;G>38D7ISm5fh2I?JGdvSF4-?6q~)hiv`lze9t%2}aV! zoK?KsaFlB#Qk1Eo?A!)}}4q&&j_Zskor!UsauL9UDeSJFn=AK>oODl5u5?v)nW z(ett4DM!bzS-W1m3dcwP?(lR&h?#QcO>g#`u5om9i~ksTJkI0o^9Y)%c7O?QG)0^k z?tK6eBJWmCC<8pWEylD2g4ssn+l$o7cdhT=XLUA~Gs$X~wG;jEwCJ{Qr69wu>EBQO zvr;s{m)6L7+T1+GGLi}7*KxpbK9}=$R=3WA(`QaX93w>M4%lxr)R+I}Ua9Ud*8^x_ z{%56tVOiVqp(oIo(xav?!Y4q*iJSYi$tr$P6AtqG1C=s?@`8?~zf&u|f0d1xo}T75 zuX;IF3f1#ZXX(tCP4U&Y^5WsjmWCx3&6fX(5aG&Cz2Jd_2{F2(-1I9U{>vF_{kvT4 zj{`UJN{{jNcNJ~ZRmlnm+aMq zH)ees=MxaI58Uv8KGiWB%A!<(N)4H#z#oN`4b+N8>$73YZyCIvQMXW+}|6AWzh0Hn(KxwAfx4k6ms>a zFm&sS2vLPF*m)HXlF+_-`Cc(-$B*C@^@Ghnz?0jWQTfM_`a!t;`C8bI@|Cx)LzkAm z1`Y~i%9L4(V+bq5--LE}H&`?l4M^pxrkc+wl;pz_1D6mMVkT(=I(IQh zlg=;jC#}FuVAK|>5cD;aTh@mU+N5mkL7XaC-TSia(^q1DoJU+s^YRw&mRMqLGwAuF zna=m+MOExm#i&z4i%Cz2d_$0ZS}OZbS+~u;&T=B!T?aC8bEuGY5fyIiZ|G&go3%#I zyBM|kfX|YEj$lF^8Pch#HcnuF#0(0vi&4QHe4LzK7sa%tz)qHU#|X&$OXV!LWb}vA zh_^cLL85lDK7i}oE=H&wOjh?BDvU{Xteh%p2l{Soz%F!#hu(*qi_(EsqgA421jg1j zuK*ZZIk7*~F)9+sG|uXSjNcdg>&t`ab*pXL{e0v%A3}S)o(JB&LiVx|Lk(5NbmV(J zF?S$X*zBy&IFL62VV0Zy4HL2slS47NN)>#wc%T!o5{$(VC6Y1tYfA%&BO^SHHY!u^ z`LO%P^!z;oRcAS1o>LI;a}d{5>l7uwt%Bl}0kXzDX!{*TqA_Aj9j7pDr_>fsAvPy{ z4b>KS0FW76%Ty)K$Jpe3$?KXAvz?&a^+vJ3>RIT2%3Sdh{ecQ;Kqfe|%YfBIZvqv` zl6mp-E*l%MWF@!~GKe5PClvRFRO9byOyN!^tHI51^B0irP+U*YI2W!gYAr!EC^5oB z!tNjYBLmrc%b5v8E}70HM4?qE?6Jhlpi{nT$U9D~j#S$BufPK94L!@tq~Ts8 zH#liOk0=l$KPQVb5wkb3dZeZfP3NOU;94bctu?~6;t8CuieYEuRaPu1&YFmvEB#vR zk74r0-gc`Gr>d`d+Xjb+>jHVHXQ#7sC_hYWFu{b7JnK@c%CX3;JE$vJGVD0$C>lhhJmidEMr z?naAeBIOM!m&}@7?td9n@6;lZLn`83P{k^rSFgx0NGR=3UCoEL|% zutTzTZt8c35dp^C8l`%_I{wL$IP}!+p2#r;4x~&n?fY(Wa`JsGZxpsm(OlPNu|6;A z2&i!ugvJ?Tw%7~p#bl+9W4=L?O@Anmg6En1&NWyx8DelDTNW`phD_%Bqa9a;v&#bS z4Y8)rxgqw5ub=iJEi0wZ>V(^vDlZ+YG{`nMczJPdKwMb?J1x!k0{jqVzketoN2ZIy z(d+dh&Wd8$}!~i3#%4mZGV> zahy$swC3X3T5XB5wKvpOZ)h4PXsNAUO!HvS%5i`j(U>BTLzmiR@X%GPa6H z_Q8w{rYM84B+D>k%=x^1f4_5`>pJKEbN)E4%jLzq@4WBl^E~(S-1l=&^ffcXeY_&P zFc@qf+(_RN2HUO zMasxy(Jg575U;?n4@KH}u&U}K;-d}Eqw|7ykyB5f>c1r*JT4u&^yNmA9**a7dOODN zY&&E9Ui!}V^xBf8m*<*)cDk1^Si-2{k{U*Q#X;UK0dSkp*WRGAp=77X`NrK7}bk&h>bh8J1H^8($x%234Mi{cEr9ktai3uUNUp38Id z96z5o1D#jdcZB8lD~qM__*|RIswWnT4PxBC*AJbKb+b0$&tH};Ww<+xJ(v1rCRXh1 z9`GRnimNF(D84>Cx3w63ZAwK@TI&B?=A6@kp6bH~BtjDg6F%I?9N2PxL82I3yI|-@ z@z<&B#8vE_ucwGA>JCVA318CnC3Su^Pg)iqBcXVw`Dx>U#&;GQ3v+oL!PN`%Q{FTa z-ipA*?F)S^{$@!7h$o)r9BODd`=ZX$*92WOv+Pvf)Y;&e?ju^pFC`I8?%285=HbRV zi$jC^;8e_JPM+t?KURUOLl$jlMo*B=U?Lqkp&*!a{sf+SbSQ1PmU!>Z^&h@?QJS;AHAm%q{2Suj-BD+vAD}A4ixosP6Nprez6-oxf&iAZi5-QKV#b0h!j& zVMY6T_jvKQ6>V`O>BNeFW}ebU6qhL4ZCX>6Pn*9SvM@**WV4DK%~zJ!hpf0&ezI}o zsJu9`KU&OaXtUYMfsMIMqdMyPxYGx#XDZyq%`E{^;6d3+b&n3 z^)60VGZPbp;=6jAB7W|LcD>L&7Gv9_<82!>$ToYbuDd?n77ErCvGT3rIxA7IECQ@p zw^(96*v;u5d9;Cvzy&dy?Q9G=Nka3odtCbfh|TPU4Oa2ZYDWH;wnWc>+R5N?4-gv) zI!PL})AdcNA?wtPSlQ9B{v{DKh)%Ems_Ny2Jie*%vLjzblE@PfzMtu}^9RwqS2W^* zT*SZ+*M9RvDeDcdrqczYW2y=@Z-^IZtObfxJ~fBB14WZa^ZUBE&2_z%VrKkAhiOpJ zcw=3*Vam!!)lDp_UZ>HdC2?U0|&=G2wz=*;*w#+bzfwM*0gYOdnZw8fG} zrK4%tP}DEZO(q(IuTIvtJ{#be(oiM^x=aKAYQz0t%eF=T%`-cb%81)xI*$DQ?^iLN zK2=iF{5@O+&l82?(M?kzudj92BsBOiqhBJW;PJLi-+oMB{MxoS>Y$1gySj8UpD$D5 z>zC^jcE_)G${8+SePB_c<4v(@z?3)*F{?gVjTM@AisjIcaysB_oGE;vDdDhUgsO3+ng?iN2;1l2$C`8r`ZjgHpbPIcW zFw9+JE>@&4|Ci!Ly2T5Y}EP|tlov)YgYwrtvOYktG^xv=;5CX*yd ztrRFBvMOy{$@xdVW~63=+_B#1D`Pb;VYTwlTmxHefX<39=Jvm?-R?dmk>@II?h634fOnj2+9o);pFE0PsCaNXe>fNmzi!beml^Ww%i_%V zVt+m7)3uk9msocrP#3dScXwx2GNK#r=|(U&j&~_&OyKjUH(xA|u0$oOG8RL&14QNh z7ln|MI}v-vTZFDH7P{JQcwtF|or2cy=bwHYW~S{-3z$sp#D5M7Z>m|jq$(ck`B6t{ z6}+8Z#`#i7@AvwRq_h2TgU4^Z{oXc*nS-AbvJk)xskTU zuz-^~+ZW)&LGatC6KjJtr#2eA>9!6Fak14`2b6po&UiY%KO1UfV!MzS@aGi{3Rld^ z4MT3VYV=FJMV+>Vraaxn7t3SmwxEDz|LuN+|0ExDL-~9>g2QuJU81scC{2;X0B4=< zRYe^%-tBze%2drm!h*Y(Lm{dsl{JRvU{96?()^V&gntcFg8=?Ym`BKz?Srfy~%gPgT9IrSo+!Q+?KMqKlW4HeShgGv9nTuocT8DE;iW?FkGYaf~j_qpNf0tB`kRT(koL6sum!AkKzp8UKxiJ*GrZ2ew9!{zD@c7NIjgZ` zE-cYDt)xiz^|Q9v-Y=cc&l=+4UZ$kEF?KNL_o5!EUxH>4>i_JIOdNSSr+?+1>BM4H zTBa7>q+2w-UgoX`H9TU zfhaTDr!P=jerx}!AOl9ll?e#zbTPEC*UjKH!bLV4pi5l1)%EXd+Ovibu@Uq=j!ti& zb87x20(%9rxKo}`vjH}f)m=*C)f5Bib5TiW3cLyy`J)7rPz3gL?@2&GA_x9`!(+E% z(drUhbqiO2@EWPszjtx$)s%=jtMngimf- z&?988HYp`sTCIG5Q`i!vb94siNqQ#@M<1ij=ig~ zu?6KFi=V8ltlU$Lrfn8T)s}>n;WKc2s;a6P)(#GA<^`VZ!@iEyS4rZy`QO^;D<6Kq znr*1G1cnsGfE31|#wsh6+08!iQ2Cm|4~u;9`K~EU_>yy(-BsMB#yBI@8)O`_N zdQDI>C8ccTI|Pfo`J-70w)c3eupFF>5Dbu?5{;Yad=c;nZvdxxG|_}Bh;ua29U3aI z564uzhb!~rCPpX&1It80Py_xFd|zwo!sAQX&vN>v-u8Jcw~>cS;a2EOQZX*s#my})UjKyCP#My&nAW;fKZ-k=taA1sFDL9m zHV>>96~4AT@_37|;1YJP%Kqiul)z_F7=2P&jzKI@y$|DsLd7Fc63_8=@~u-~B@J#+ zGG>YM1&<#;ro<}iB?sgq3|Uwwl1k4KuTGDHlcOU>!y*jX>4`FXcI!PGWYKH;=Q>xq=!_;0_@@AY3J}D5r5sz<^${>x5gqvX7_+S^X zP-l1fiJo7g=X8FLZ@M!)4v&eAjV;t7qD@V^*ng(ATzBejjiBilvH&D%{Pj+`TVl_k!!J_K=4evC6b6a;qka2PQn? z=kISABdk2`1*eE2b>9efbnC|`>)rA3iMUHCaYQpGg%gaVW{C5pQkYJJ!|Ca&n~(Rw zK14z|e)*Z6LPkQ`siLwn-+cTY>AA-55B5ZiB&j7B;lKK(r$}ck5a+ew@l5v^Smb76 zR+io=@T4g(U)CDonnu1Hhx3adBZQ;%-(W(tk)4zx0#|B!NysUDQKv_TIr6b_Rsx*X z66FLEW}`1OX5^~lqeqW&q(;Y98Mde8dJ`(JtI~uz9=RpCXGb|G6G9bQhm!U zk1}sAZse+ksi|pJN(a%G9~-Du-YBux6I)@uaxox@Kx`wW zd3oO!+|@`|m^Sfl8XF#6O)&bY02h^h>+0figO*aZ1;#Sd5bM4x<__Y|+z#dRo*V%u zv163AVwKh1TwKx*#TumSyX9<^N#~3qKs+jUb#-M)yo27v;co&-9e(y%3aqxW-#OLM z6Vm~3bAy~d+A5w0HOD`^>8R;R!gn_A<%9KVg@l9%inY~J5f12>kT1giwfJTn`O&59 zBVgP01fq-hyaw>flPGV!8N#>zq~qDK%G@K- z`XUdW{odBIl<6l%w?}sFJa_IK`uTFkYvfkg>vMps+}fqw+EPRiK>6gRBirIeWu|p- z5jIv<6*=k{urGy#n%-C_8t;GU>CsaH0Vd~}kN5RfZ<5R>A;PdKBBLTV0@{xt=W)v@ z)ZCv>pFS;E+uBO-X#$;6^V653Fk!?-LYkDil~F{*W(dd*De=VRq_NfD*`cy5W3T5b z{l4k-W2-x1Q?CH5<^C4BmCR9#m`>=$t9CMH}8$D3z`dcAel=YL3ZwI znB#}_mVw}y(CE$pKv8Okl1NZ7Cs`mn<1c6TzRjd#j)}xZROjYSqA#`$UvWy5Ld^-; zHyweU8Cv`y1be(6d9{j@rqtK8m>=zh?@tv$-KXv66IVrV5PBpCVkDsO>3 z-v#7f;Y%^fWJKY$lPzDm4t^*4+{ee8Xu1?P)RnT;TZ@uaI)sL`EpRVx z(ACkwWSWJ?v2yV2{{DXKMwH&hkJ2rVhv;5jUJ;WTX2tj$@Mlt>>Px|30?sb{pl+7g z$;((=J82Dye>Pcq+kC7+4th0^+nQ=9#c4-q4f>|XF9&jAX9i5u)aHsnTVUPhyNW_qH}Chu0%DbAerj}UQvxO6CMIE@5n*|FF=XeS)>MZW zWp~393FaWf@*c(!ng$03t{bzQ;B*PifT{kAZ9K3GC!o~N{_NzRn2`1`Rk_uO6-q6o z-J5*r&bLA-BlY)Fj5KAY1ygSLwd*ZYd|}51A2t8*En$pJYIK{lraHli#+J|Upc#k{ z%?!4X60ir!ImW9oumxEhyWWut7UxdPk0nXit==E;f?(Ci!>I$BGg4D_&q1(ez4&Qd zfWC8Q<0AGbioH5lYRr0zKhPE@URR|Tza5s}DvSgV&LGY=lZP?5d0apyUPv0#jtGmR zhj0a=_0=3u-ccF*EWUmF#+9CyhA;H=^f(CzXv1KYZBPk7Z1CTV84<$|ljZBIa76?; zu)6&Nw6;5P@>0moz|WsQdvY+^FyUFn+MhV=uBc|GATjzCxHYcChC%C%Dk7wSmD{7! z!w$k0MWuVGp~u3Dykk88(ou)qB!Il+@&2R=)Iz1Ea*?m^AIrZS#ix+G90A9sYxs`G z?-)-7VB6}Ou7i`A`&L2ElGah%-?lySBY<;I2wVYf6cHp+RNi<~z)})GpdSLjA13)l zcqa(nWh83OwwPeGw6s*3ynGjm?(ax2(b#)D;^Oh#xBO-m?mb6OvqAgAx7-HUjqPq zMSsWO@e4;qVLK31#4CwF$KcDMp{&BzRJ1YigK&i{$=lP@^L!><;fekU^Su;;N?l=g z_U#NjrI^tF2Rzd*O)wA&{~PAfY02Ge_A#q#*Pgo;bDD4mc^~ zoE+hEEPfx1=d*A6IY$cT&4dP5Zn_%xbWqcH=>U8QN^bpBSAGKq6r6e!E#=L^H}!og0Y%oj+`?? z`}o{+iK#U!el#pKovJ*kjO^qj82#hoCQ(>YqALdaI#4SlrwhG`Rrp~SUJFA#l#%@5 zXK)-LnaR8;1zKe_c#gD=NVNXueiwQ2@F4Esv(G2xw;huPhW!rQ1_4q>Tyd_zs;H}j0L#T+YV+MxhJgC}UtSPF2q2UwJ-+zCH{Cy0 znPTEqM&8kA@a;>2q{NGWUg2}2fsX3n)vx7v_v|tEcdTv+9Mwd2_TzLFPF*G`Njv&} z+%9w#?0xY3NbSPwe&vM5v-c$H0Y;=ksYi<1R6DcSt96TtZFV^Br|wt zbl3Mg)T-h>;U%tAJAyq3745y~zunqDp3@JXDNizRBq@xNNbLL`&7-vL3@s;6G*7`( z0D64JedXcX4SSK5<>V1lt0*Zskw_l48ul4O$Jh%!`%LD`Wm*^k2H1G*DEtGbYnf7l@Wfu{uDt5qn zDW87ats<;e;7ay9x+LLIS9`@WNqI6V$1r~L*JM{({C43JxOe&a>uC%kEhV53K^fbi zM_%3v+iUed3y`Y%k=i3?P;;ZW7(!JzXz;PSH8WSs%gZAorh2nM3l709_Z^S2hug;iXW71eyZw*5YX+3SU<9ff zj(9^paS?R8LN2bZzwTuQi$-@18~_!_l5rI#c{+~=w)N9rF5q02`j>ZXQEpAOlMf1} z_RvPEyd99e%F=>h!HvC=mlr+zb2AQs;_)~a1j01vNWDQX!XOVXn0&1uH{LGhxFHb3 zQawwPpe>Gi8Ik$w?Afz6#-WC!Hb+W>29!9>{80CSIC=8mdvJ+$+$Y>kanR5)AI9ZL z0v^@`vKxK`pkl{^xSbz*eW%~1#05DrLn(nd+1EVk2&*FSJfl#Ou$36^qwZ*f9H+5H z47=lE+taacRo+xluZ-RMaRClMEPy>e{+EmXbLluQ@Ah{!H5y%3vI%?s>je;AQiY#w z9jOgjnv}fQHgE)x!77Q;44p7FlrPWJq3S_S6u(4-BMEEtHDy@{P6HLIXGsw-Y<^G@ zCNJ<@$pV1uKmx$(NXZZbD-!wR5q}fNRgi2_VzuWZHtuOfY^?cMH_yY<8hf4E9Bcht z?VXt4#-1rP*4UA*;5LJor}`Ld_C3tUrTRQ6js?kn-@bjeAh{w364TPyM=60RwGbR8 zR9+>8N$E&WJH2}Ks(Vp;!6cL3AnfsaN*!7+d?1c=@WWIWkw~lu@n-Em*%{5E9GGZ} zuZ9kjlah)H3JW_#r&3`so~Q#Nurrls6m;&N_n*{9p-@XHN=}V$n&OlJkw|DwJqy+( zTs4UtcdoUliah2XQ)@wL2IzfE^=g)`4h<3J=;k(=C*?{Q-N?mD5GpH+zSa8E_iT9_ zwZ$!FJ(;kIfbRjd*mhuGV0L-|Nv2TDYt|>zD1aFB7*{q`RjEPW*sfQq(pRori7#=e zo)_dmDf=CoP(R*}MQ9F^%xxST(rC0c4-bzT3(|RE*e9a9e#i<$4>+K(@~3nT z@?vV6;%#khQ`Cd!Z=p*BH5s~=rEl9SLQHNLBx9CmDzH*{3)+VQobqJrPX+S*Yy zwBh7*sBL|hd#p1&AE^9Zt$#jK(qB{HvC67ApX)$TqM5L+0;2g1u$2%gfpz5j1n2idP^OWGxwsd7Svl$^CMI$lEpE1X zub`!?%K9#^pin$jIs~FD{y)pK;Wf=s^E=o0-8?XGrD*Llo8I|SaL)Kg=GU)ZuPn|b zD>|}_Y;&Wb;5ZFVHnKVMy0J0hQn}p(*4*a(JFq=u(pwXWm)aqJws0f%9F`xtVQ*i1 z(*A1PM7n3pDQlGTZ*uq1=34|nUq#^9=gVV2hbe`^RRh2u52$Vw1e=I>)^gAe#cxdP zL8LK4#-c#0It`)vHnGz?vH{Lj;}651LCvi{i^yfMSi42l?%1VgWMJOcp*+UI78*>T zK=2uQf7{ocvNt*)C@b@ZVGGk9fk6D=&dc87G!D0kmVz7qw6-|m4U~xv@z4CQ%ZL7w ztPRg;@^C~}@=b8#9v>fUPn)1r6crbDIo1085{L3r2Bq%ao$djLx3kuQtgWqW9fQ8P zYJmb0W>p^a_U+q4&|}x%ZhriEj?%mQY;?srI!#D)X;%kS z7JN z2=K_XyM0%L#D&#;(lM*w+}+o!_1Lo&6|J$N?n=a|H$3fe=aH>3DT3mZCY{D6{kA#z z$+%(1fW9j`SLdke*%d?MNFbwSOlXh;M;{Xqm94L@Gakl0Qdm)M^ZhzJ zmiYBS+@rY`HE^p5aTDQy=+U&|1W{pe&T$jhwc;x7O?JM_c@UQXUJtmr7hANpT#3C> zsvO#H_=b^CazjpaR;jjuWHk%!=3=5_N*Vat7Zsb3W^`&U?@dq(SB=aK z3aXjuJ}-LpL5x=4m&WS{)D;?NG+JrH>M&biG&3==f4V|T8ppjHDSqwWAB3blKR-S6R($10+oT>JB~aFcRfNw?PENjHaD=%N z*@-oJdIb3tET(TL6dhV646`7|U~43D0)>k>F>MKt#WgBFVgQ9NhB8Qjdfam&C2j397BF)v6P`4B=~%oNcm#-TPbqZ0VCkF9x|- zB0-V zTEZDiWynnPlZR0;$`b%D>**mq+eJ@3e&wM;G$AQTWr*-TN(P-YcKUN*3tW?^`zJ3A zJzP2cb(Nipc0On}6{yCRdNw9Z)0bymZQa?!LZUNXbuK_` z76LHlNC;$*IqBVIEl;NyvHrm~<>lpJ83rB8)dPC>J_OqeHOB#Sq*2z{-BY_!bCeimFnvPH%gb*B z1qKGjH$I6;f#AKgWDDz(d5I)3EasF|?(En0_HPd0t+Pu?6nmO(7L=A|eJ^z#`=)vj zw$~Mi{l@@ZbgF9Rt%QI}hH%-l(cy?3y;K76SH*9RG3UXFa zl#&L|zm;Ob;r*?TlW|9@dz@Ut|NLyalkxE{S+?Ym7nt9jCHN-$F<>r}UdB z7#z%=FK}sN%rTJ1*7`}R+h{bXNE;g)liO^ML%l;e2zsrJHwHY1Oym#OITZ7_bX0p& zS6eTL2fx0WuCeF~XG@s|h`F0VLH!DV@PW9Ajb{v^IhX{Dwx<5VnWa=V3s(RE7dZiD zTCq}U%GciXdFWdv;rk5Z#7<2=)6Fx>Aev|@n-tbQ4_P`o$Dk7m+b!CB6%&_5;n<`AbEnobb|6x!YD&VI z@TH};;m;>`As++ z{-}xj%{?pQ*8RVF8qfhHgbhHmGs2wLAzc#pap_ZOmzLq)< zew=Occ5id6aO*OSsH10LbxY;w&9sfDozVG}>hyrkjrFt%3HIZCqKDTPt4rPNEG;b) zroz|ho!Fz$`Q@u^e}R9Uey92IzGvf1Z_tM&{QFMJl=BRaG(GI583b+YC%D!fbFab1`{2K|Olh@UaWFCd`SMnOa{l_b`VQio=7@%$?3@%3wm-=KD&l z%`?bRU3Ti;kfnbPB9!ZGm1idYL&K~LjFvRx`bj~V#S4WrtI2mXnhw*`0Y`etT)a|OX>%xBt|EX)V$+p?}!t+)M=aIo#>5fv4s-W}S? zA6Yz6EEip)-yT&^P#`xUnAN`8ld3%N%Y`E|?C#Cj{s?QapKCZ`?iTQ({)zdG$|l%v z^ukHyfh4`ni1IV`dOA9JRif}Z;3y3yf8ZVk2iS<^{)z~V1-yMPuZup8&S=|9|e zB~gNUK_(}Z8tm@it{li*QGQOX1eBkEsflf&jeX-jTTRP)Zyvy1-Myku1+zpe^5(O> zdXM$t5m*OklcT$_wwWC+-dd;yJ+i(Nv62Q`rIMW zTLV#b$3BEj9j7=hQrzK{O*yK&1qq3GsF~etj5-=RwHEq(v!^3jLC##wG2rWqQ*|5n zCa5yc8e})}Wg7B@Q>W66Wedcr^w66DG#Au0Gf>PZfHQUh<)J&iiF1GF(X|Is#dc+H zRAgn{(dGAi8rLTeS#mQfTs9^YoEpQgxR((C;8ra$7iehBS2F#5$ay2|>gb_S3}&xX z6M{H==-?l&UXgu8F1~_b$(Wh>v*F34-0<(>nglhuFc!#zgTIO$>sDppte>)aA@zRm zB$|f_D75k6%jLS9>BV+31tR;5uc05J*OdQq7DLpC?%MZB zT}CePVK2S{6R`sxXFi-c%olY|tj+$rMz`EMX{ZWsR67zl?00E$;{d z&6*8ViD!%-bm;Aju#`W7#bVPd;?{aR4LS75$)LiM7ys9`Y3XNsVwj_{|7%v9da95& za}Vzkq**97p6mRhyhD*{4^?N(gEB}nu($tK03lv7BC$qXI$9L4Z>WIJVzZACZr1cb zMEvsQ=2VELFP4hy0rv4&d5PYmmv2V~ETTmIuZrUnjjDKTeLd4h{QeE6`>qF|}Cv^2vO{P%EE6X;G#fyOj0=d}KnxV#(?3p@sd zqN1XwE_BzEKoDz6ye>B_0aVzX;{0bueuB~ZT`{^3%DX{%@;I{d7mm*aA;kO04GhON z0BMov(O&AzevRC&DaiY19tS2qU4S~j7+YmRAhKh$>sx^A=X^Z?>sjs&kk+@C*x zq&HO~xz##Qn7=`xxvUZ}T(M@OUO=*v6J}*_N#JweAY)fk^2PG|TO6&gUtjismINy1 zP+UA*Cmw|Dcv@Cv_$X2N!p7Im=)>>kJ)F?1!DPj69OAIzH%uL=v4UL$Vm+6NZfXes z@#zsG@sPqy*Ll;REJ*8GZ9kO1GVTdr#a!snA)QxKpK@fAI@qlAKzv^5qkBn7NzXR6 z+K3;joNi~l&%*@gs&myt{TtbzAVAA~`*zTH_*ckSi=iNH#RL*Wvk4EO(?R zH6ft}AuJI#+tX-efAZiT!O7h;nr$(H-~#E6qi)}sNqeO}=+N;Ge2d-;xcqrkc_`TF(iH~t3M#wN5FFVDGemMD}R z0psQ*@r;ygx1V1U^ej+=zxcsLyuCnodpT)BgH9|7vjvn*7p|A6Vhf~PhIH4mwvrMs z8%4IrSc1o6y#m$&pO;`%_N8&k9|AhS*0r$fe|`m5W_iB=jItW)y#CAObAjGaCN)SV zPYMAz7-=k(&Aj)?zA=1Ve3M`R1eT9)-n=o)3jfpYnbB1_Go|SaMICg-rV3r+iAMfh zJkJF=1E810jLfFB4GglE2fx%qZJk5459bTS8VF93ccFJ*L)oncy`bGXS_^cfujPRH zjU@m|^LrRo+JGLb4~dsSoPI4^+B~*8`tthG+G*mv3p^0#1*qHwteE6fn?|o~B^s zJ0-3p?;?#+Q&(q#28;(!{MY{RH05TfrxP%mTqkM64S~Ha?z6MRvi1TGP;%-3r#T2r zEne(Jnm@2`?Ok5<_w!r*t5oNw4!tnzy#7pSco(c$yxml4(Be~<(w%dGGXoQFX)b-u zSt5FkJwaW4RP0dQjboM8m>J5LXNKR}SX)%HzvK;ueDUkRug_fUxmbRB>S7u|%YNK4 z!-=c;gZ0Gn4<(wbp%to|d2c*Ao+(wbIjc3Os;=$;FDL)m9NQ_@cHWBA1?dN1+#z@n z7<&?zi||DUBn`rWbtDpGz>V}t;`|}t4J0Ah<;s&maCdmYxz-n8cA0f3TK_QQY}hF- z*PEr?6Lyv4kLzyl>~v}P;$sIfp#O@Vy;-|=z>d{M^GP{Kl!ID>o*O2p!)39^DQ>-6 z*DIV$&N_(5KO2PKEEcsD>PDqKA#$6?MYZ;)_Z!0m1M#f z4CHmIBX8aD_ynqW#SkrmY*c~VQm>IBv(TNifp0XRM1J{!8GXPM75VX>pE5Wv4-a85 zV*l;8Vblq!)DhOd|CVt{I+);AuUUPf_hdDN{Q8>Yy1A@zd*%jq zEVzQ+fhi_1Na{eN{Hs1bKBnP+?!JDJoIG}lAag1O61~~_w}4vt231l!>5dDSBT&ZZ zw3~KzHJ{CoCBJx46tCxQiNbX&rAxxqY2p6WP1&YF-9%@oAx&0ruo>&^J>&M5I0?T0 zOZY%@^~+_jkn_*Nzs-c|K`!D6n!1@DcrX~vKKd1?4?u^mZ(>>k^IJl|LwqvGWQX3x zZF68yEh^2qLi*gFXU@F^aPi?wWtoF~QF>_$d z6Tix}W5+V-I`aA52(9e0vc-yvZRdv8v(SDZQ)CKi_&|1Sf3&0a*dAV}1}OqdVbaME zYw&>5_NqVMnxyECmu^oqvasmO0WuX^KX}5F%mjV3Db!d&MOGf@q4gE$v+Tmx*C1I} zZ}-d2PmlIcN)dz^$9^; zv|63lC^0b0gH{^g3?#RMF8iN8dxl+xzAf782rqK`+P} z)rkb7Sx3Ku0*v(5JFCzj1pGK)Y-vjB1M`-B9nyRP;FHlx`Bz8}s#c8d-|i6>1Rk19 z1~Fb3xoVGEc<}-m6Y^2mA58hvL8s{{koKV+S`cn>r6CCqAb8pdjCf(siNdwf!Mj6$ z9*N8Z)bIwfQx11?ReRyV>j}7Z3B%`wiW7+m0#6UbSZYlPdk>s@CuZFb45i$97jNAa z{VJ&rgi8(7oF$xMtXVoYDFX*LFb^iRJVZ0K=TC1?9Od&bk>)*AUZEgs$~IVJ@a;u6 zV1T=n{hUDlD%_kRr~_OEdSE)}s4zVT%&T=^)8FrYbg2+oHl$}l>=s_~)_vlvq1-A= z2~?^Eb9>-UJK|lpYJ@-*J4(bF(Do``Iz|A~bq+oIa+FUs*(?Y+Oz~eKt0n&E-zhY} z;5J|sb1ioLmT(l9kAe#W5nK$Y{b7<*_jaTiNm>5V=mx_zXqqfLy$H@^Ie<~p5Lju2 zKruQnSgeCEOIm(TSd9YGrn!CYkq~r4C#i`)6T%xxK~3Ac7gY=$HM63!4tLLw1S}#r~_~( zAD_ClLk`)g2{)8F^b1fA;9q*J1P&T{0!OjIV)X6EPIM7r&j2fvb@$}KSAvkkDyI*v zTrN5#N+1vnr7S*7 zn!b8*BtZLuIT-hx1IwrYimwF;Eq$nPg#%1iS2F3Qps$#p11;@yXE1}RRC=pIJA1j0Z3;Z z;)~*TAlSgxqeUJD*6<2)#2_NU4W-;1C#t?s%Oe~!X7`btW=RB zlt>5#$mgHX(qe;d1BeMl`T5`W2WaR21Ol>N(!ep#+zAcfZ{d*|*XV|vOg$jn8{A^- z^T{o44gZ@9OK{gsE31BfDULNM3bLTdeZLEXoq|3Yxc4~dWOH9%Q@;V0FsGPMZEOkj z?W9Y`M=OAwUMke#9&6CC;iq9oS9G`x=2O!f1FnrM= z4-&tcC2SFVA_6$i6;hy2L=30FA{E!U-|Coa?`E{n-?J88{u6m(ktAwi_0go(WuJe9 z?0!;^G-f}^r_fG$`Sg8(V(Q$hFa2qR8%0%ALc-8!^68Vk)K^A@RSe|os%%G5OTX^j zLr=397H3!S<6=y~V5F4iSC*K_{mPpQNn_R1?;od0@8&p)@%E@jXjHIt&BSQ@>d z%hbH?jZD`JED-yU2HO~6a@v}^&mGO5aXYZSEs~#3r(=nYjg6|aS5|%yVB5ZMz~C?J_{cx{0}>tOo0qLATkqMbh$wj6xl^=9OYp#fSnt_ym-Sy< zA|6y<-x%X>^BvodUBV~3j*0|jE`DWl9u~09!K|oMYAL@Kzo|ATcz1vy4%m2QtT_@e ziS1{0e?S(5u5`XUrvz-I?3sy|PwvEFIroO_ zJ;vXAhh=+HsR|vTaO(w&!CKf&%R!~5U#|8njWO7p{6O7D-gcLM^AVT_{G=moU?w?z z9yflmgY-@G*qjy zf8K^Q*$&oLc{?2lI%@0~H{l|J)V&=OVyr)-?;;(!7p8i`uL}qYihnRz;KA*J-ZC4! zw^nWe?SlJ)X?Fu$Umr7`o~Hk>cVOV)J}n*#Ck=Vbtp~^K_-~z!TnNo%ULOi+)uIGa zy{TiTPdGhMl3%WPf6l@SNY3{Q#&*3H{_t|Kt7+%&3;VY@0ATDFk7H^2 z`UCuj)uAXfFxV01vwGQj;R5~3qhcta6lb;bPc+(fV0lsp`ufUbX0=r!z`bcxtEwxj ztD6o=B){78oxgyE27qDNvy=>;`XNw);@(#X^nEFpXDGsCx7;`fcx&x#`tcKQ%A(N` zZ8d;GjX^`&;g$#IK%fwzL%=+e8Z!C=eP00u=n9xqEDBPgjid|?Dj177I65|zGd2ec zZS}%`fB!f#He6Cz=x`lK4aJP}(Lt0_r>6Yc+7JUv>=vHhkr8dE3vvLTP4JJF2L!vJ zw$y(|m>yASt$!swa~KS5Z`Ovc%tQ&cb!O_Y?SLv&)fiE|8*n8ek1+45^|h3PA&lOWFE%h61VV)V zMdrktqtT~@JdPGZkIhtVLz+N!!B}2sm&i z=H<(mZB1Oxf>8I%bMy<8IZ%_Sp_&oL=rU)4`|kRV@&>)=C31;lU3T^Mh)_0X@2`ku z0xpC(3q+sNImCte5^!C7P0c++ig)y?ZINABL-n18QHj@F_VsJm-n#?bd^B3kzoKk_ zOqtM_E(jI*aH?l1_n!>+qB8HW>L)ky3!GLlT=M%RyvTtA)GF-0@1$e9l2z5zbVY(> zcvFI0n?WBCo#(^4aj|18Z_~^;6^wNlxLQ#V?%91T;two_0JOub=CRdi(Kn4o& zK09ydjAdqp$=#6|HW#x3!)8cSqTcVuri_Kw0IyjR(2-{$BV&2V%d_Xs#WPnI8X`C> zQ^y4>bEa%xVFe?k{a#GpkU!bp-PbNuDMoogAmMXC=v8a34oE^HEsP*czJquQWT4uw zpFgjvw!RQSioj~7qy-W2y@884qVRyfOxG&VVe&0~vdi|r+fdd>J z>tEh`#{kU_yaVR%!_>{ZtO(6ua7euM@d2H<3de#0}i=h;1^-=W^U~4zJM@nb>lpeNxMzylnm6neyC~fv(jNO zy?1m6Z#Fxfm(@KtoeE~TI@%akfP$PHRV|fmWo=FH18*MsE6J9nj#k&a)9o6wI{PV=VvM5C>!Ra_71sw3!v>QrWnIHQ>ntmzRjhV~}@`6|b0 zq0P0>CZS(v=FocR`bD)3Dj4Z1mV2`+y{U7+f_SdEyIBDEOYM>2DS=6KG?FF z;Y9j*e?F=D!v1D_jOT@Gphvbbc!oaiY) z5UF9*lo~9*&T_94Jx;8holw*LN))f{CmV|SZqjl87z*OffY!6I~AkWbI)SdVh0BPKocUy1e z&wvKva&{UJ3`Dlw7s6gme_IfCedTg-aj_6UX~;!+`S#;k6VL?4{OU-dDBi5U#|0Y1 zS0Y!z$II)f&$sfocNWJ#v+Lq<$KLz|eZ+=%B$~&FF*UVzOe^#&s$xt(=(=9EirP88ud2*^RMFn|yBrWVj=kj>uc_h4v<9iuh|H?c?KVITU< z^+p#?=(g8LL^GsHsEN=@VGiwOv(*`j7E*J%QSQOf9LHw`e={Se;A!_(S)qeZ1rO}` zAP#$7;1uA{Xo^@;7RJyF;U&Wt&|LbVBBj zS*X<5gU{XFSr}`u4d@{cUJ&9FnSXGJlR=^Swl_*VY*$H5=ZN=HE*Un!)Ac~y$9lJ> z3d0uv2VZX<4rTj?jo*W)WSJC_rACWNmMHsDrerCpCX_W-oON`~LHD9M2!m5i|FFU)S|n&d+(Cw`>TXAqH4j@5wd{fA3&l z4;9*Yw!c31aM|l=FeyAgy^F#i63zvs9>$fJ^S> zO=_8eHcWOTN|30iC^Hzrzf1#wCKUbDgZ1C^f7I6I0r+8^k&>blAwb#>p-l(Ur$1iU zzv;&r?i2?Fe@j96DuMg^Vi>hFNYxGofFB)88K*~BLl*_Df_Vu&dyw<2>yA+G-GhYn zhtn8JRs)((Op2vsUWRMC`VY>2jpefSt4H_MqZ3v%<>>05=LrU6(KjIRTfY!{Xc)Ay*2}rBgaFtcMH)Bj-IFXBG;J&bUP|iq65(IjTi#t;S#pRS(> z>Z|=Ou>nx_G7uJ1abadbqR^!onI#OL5D6XlLI2wggj;kE#pTJ_4nwql>#2dVMiCVsW*Ak5#PX zpAbx+khcu#*k$NP^5Rm|K~t)L*JOdz)k;rx8}$W2Pyuvh!Ib<`zb!|=ffE6i-M&yt z7{FRq#qNG+RM*zt7JVy#Mz5{lqVk;2kxjYPD6>1JjiaO zIE`!&`0egi|JtUESKpS4we?iWr)=hGsCqJJR^wUC4)fLqiZ|X6&DD<)C6P2K((_GV9oEYFwzj?i6Feb z>$4^$JU5^W-HSl(SD)kN1r0l!4^KkGq1{(A%9Omf)_3g@)Rf&a6qy^p$Om zJx@Fa2toZB!%@HD%5dQTmoTG^)y1g2B`WBnFBGx>s?xv01a^t0!Kqv{S{0!bpuK?y z(cw5vxpSLd9~r7%msk3zMrU!iEircEj>3Wi^uPiXP2n{P(*xJ;)>SVA|s}gQ*Y2<2Uz7qJ@Pj*#Cy^*UD zX~gIFZf*7<%O{9pg0&}iNij2p4M}DPiMr51?ta9-c8b z039lT%Eyz~P&O5S6$Ea-=l{w^&sD{2v~hsq^7Dig-e_;A%+D30=_|oQ#fP^akP!NS zAP7Mvm(YrK1Jm-q3Vq(hJH#BD2Cx4(wd zFt1%tN=h8h`)oP))@3e1u)2=84IS*0 zG>RL7PF{iG5L@|{qaO7;>_oe$Ap?bmi|JLRYQiPrU+tq=4j39U7kAj%S~K6fyHR=u-MnbwQ5{g}x!SXb>XW8lvN|xWR99%Gu}{>~ zLID~8QPE#T$!lW2cF-3JG5?H08>8uqt#9S4%&o+#ifQ`m0Ol3Do~cam1)m1t%W7(C zOvF8e9Pd6e;Y3ydiIQSKoJu0=$CCrS`?gftUyshzM@h__sm-!s4Sk91Es4KPSS&)O zdB(RAmmgj@h**I~Nw=W7#ILsF8#$uC%BTTt?6lus0LszBv2mcrnT1z2vdC?Aa6|4i zXL$Z5n})1th@8azG}McLJiX&*XK%0kP)c&FG_T7s9By~@g}ehX<20LuCyKg6A&A%HwB>$m>Ys)(&488q^p66oW#P-c6EO=VRWKV zADvhinyPkZply=eB0G;|h^Xot1kneM$F3D_U?u*hjZ7f3WPLhN3dxHZxq}cH8K0eo zj_QR~*Cv3@$VGSQSxyPa7eF(&Ah+v7)ualQT1>dU`zxXzlC5mnr%yHY&jSk!U+_ zq9UccABI<{sOm-C>t|QXxrEFL`)Jn>b{+%kA!T;Yy9Z93Fq-DyDxj zyD`>o{vZhYg=sF|X#EIP4o~R}-~Lc#%sUXFH>1fQc<7x>uw#tq$gbL~*d2dQII*9T zLnLx@a!lM~sk8Uk&sAqv3Oq}ncVxLVSd8M9EB8jm3om&1=Dqz~cJF%_pG!SUv3%q5 zH4ZHtm&&Yr4i%A1E-BeCxDX7d@34{m3zER%ky~nU*BWQ5IB5-~h;4$kt9RSoM{QFq z?i#&43yPxqZ~b2@f_p_fOb-|z_U|Bkr2p%S6w@09l7TBg0TE#ze&SD096mH-Vvs1; z0p=a8a!eSF=r-q*Q3IFN%UZflj($hm;v;YvGtuBVSPIK@5#7L!qKCP@PLq631LH$Z z?AH;wFg@9r?B=^gpD0UiFe7#jy7hvgk7X>S8xgY_;DDK%%@S%4hABE7`t_FoH2lmwRi!4 zuDWclp0cy1zCOk%sNe=6>f*g*lJ2hlccgAxo!_8Zwd$jmt-w4lcBjmQ+ja{F#a0za z$xKT2OQKkd9GYtH2G@gN5;a?hINC7dNr0Z8@|M<4)vDasf_m@%`B zNXhSfC``+WEtr6S#C0Q;yJZ5T3wcQK?tXwk>vqd%oD>igR2Qq#H4aJvJB78=#52j2mm;suc?_?I$^i` zh=FA!0t!B;c)FO8yl#iuPwciQPoDHZ!5D6c9}B=xZ{n^R28$7vtWvUb#{8p!ep_t zq5**3MB|Eu@x!<`iZqD1t*yoi#SNPYm=FKv1`mW2?+6p?OJ*~JFF_-R1<)QaJITGz zE@k9euZd!%(#Uylvp=nzLgMy3?1xN#E97N`>B)|Kdt-NR?~OV*#x|p+7H+^*3PmHo z4e%qfKnAU9Vq&rjK@l|I<;G3){zef;^q0|KO@pA|V&MP5x+Ih-z;e8W0&hAn;^z2O$e|c>km!g25$FeF zwka7ITYd24k96f`a6mi299UxoU^xW#4Y8`p9nUk!mR@~j3X`(NJ?ks;!jq$;+Wa^^ zT00-Fc@?a?0FdRo+S|1`OiN4OsW#Ssv9Pg;%>)dk2+WYaQ_vv(@_Wm)ft^N zqHvNc6D|k%1(B*4`>1@m)p$J@Pe%Z+#rjmvtF0-UQ)#W_0xO7-d=I#j&0b zWHCe*vZyb#v-Cn@V%c2f21tAGkl;?4M(wbuD9;C?rW*rapM=CaPUll;B$@hUaC2AN zoE@RG=Ykk^J=dnk!x8qN3aEK!;r_2i_U49jeQt?C4&mZJQ+Lmd7vdUGV+pd>r6Iio zHwq}GHa63*A(hY1@+oAH&wqGeehnsM8V8JXG0SD=S-a`_Um2P0enHZ%igZmKX9Bou z=F3DzV=U&=QGR}7Wk{l#to+S%&$XQdjL_=N$mB3rFu+R|UAb+vk${-Z9Y*?gZ_l98 z!lzr6@Z#rpAR=I=6z+}sn*G7enG!bqpsD6@;pLLWt{fvO;-3PW$PR`&6^30d5n$o* z$$CwSuwjd6VMedD&ZLa*kEPTD^O6m2nQJd2w+RHEvHRbg6J{?EXbBs85mL=ZTv%2H z1i%k$+a*P8_;i8JwQF|iq;&(Q-o`tyCzXZ@r;Uv3Y>=h?Lv@(jUTz>)1JAeF* z(3%y-nsdU)!$UjUEghqahQri&WxST{#E5ExB?nynVotoqgE5&FdHfmD%Wz1pbb^(5 znq1gJ-B9p*^_(Q&tP`<#ny^7bL^&KjNFU4g5(R~O4yobsF<+a-&YZcPPBDcc6NBMy z;yA46h7slMKuUUgFeaY^!4D@rA3AacrX^84)hlV%U1fw;J)}3JVe3^Zxv-tOkq15z z955%u^H)3fxDYlZkWYdWxo`xrO*Q{Y6@lj-auTS69&!s~&hXdo`Ovj6$i00m{h!I3 zFHp3Na^NI7Q{XsgOooOgim+IVn9@k5k>jRDNAm&g-d{Si^;n5NfOM%kfbDp0KusW7 zx{-6f^xwB!PPxL-^?!T;Buepkx+zqH7G zvIpc6`cuFwyiygZ(UrS#5W6BuocO6T(G7pJw~s2k=)HA{fth>RxLqfJbnFe7-sE8N zPi^uma2~~}YHlwR?+%1;<@KPczP|pxn2U*RHy4*a&0lj!86R2B<8$ObUefrenz*d{ zX68(lY-B{l?>+se5Fu!ud46?)uw9EQl zy~B@0x*kHK2z|j1FXDc>FRy;CJm~gaE%gTC3Q{Zhfx z9CG%s>CE-sL41$sH`LTDtHOT1N4lIq9f`c7Eg&px8#*M}ccYbqu$b;ibm*>k?9)};Ef(WvW(}e|YCj<4noK|E(fg?BrnnTDm?0Xq@2muFB zq-_a^URzA%3Jp;bw$nf$i(?jRgds$OrLC>O3EX?c^qYh$sU{J^6J&v!yT`CilJ+q3 z(Bi4?ctn z0bk0f2sT>gB@)j>XT+<#k`oh$E@D#@&5W{j@V~3?W(ES?Xk=}3NYH98_L(q5K?AK; zW1NGZ=t#^qd^-GZPrs{tcma1epoV9bZ$=WvFsX0p6R;cx+Ltk2sB~o9TrUqueY(~5 zHs`8jnffV+d{9HvX@$L^)YVdjug*PPARyUlHaCHgG7LgR+x?*#Zym*JtpBg?_(mep zz|eSTRZ1Rg%{a>y5V!P?AFb73U525C-A6P4rD_BKKY{1?w@GgXa?x{&Qg{i}a za$u|=mqre__W}n?zDoZMl!u3RKJ-LBBl)lmcKrJy&Nf@G0Mh8p(cPj$kRm{8eFWQq zAmqZgDJf%4HKFF=K?@ZZP?kVfuEPDG6#^@r2F(V7ZEJoiwF`$1-I$-je_ zb>m2+ik9~Fk7d+k@73$ zh75$DbC{ZXa1-(bje&4j6U=-VFAme!WD;!Qr^NfDd`j z*~zJUPrqF7P&>dA+nagdKpuKGAGC%%8tQ>XE{2U`R3q39J?{^A54ZD~ERO&hy(g6l zq?Olj>N$;>NtbzudyxYOFGr{))I2VsGiwvt?MHBLBz;zD4K{a_n>sD^4tGOzJ3-uT z62y9+yib1rzGqMWX=EJ0Oe0v0g_Q9UP%m#ryR)EOwP$VqE z(Dj-m7jOp{`t6Ye5hmVNhbY4oiUzRlj#b-pp|gJrAc;)U*iAuMo+Z~WVsrqJlnuo^xVX0Y1rV`d z*ODno+l{+0LQ|UsiR&SlCspN6QEoaXiH`IjylRu4==A(I1ONrPepu?s#$8}B6>_(VRhJ_U;)ccff_Hh+J)vQjXV&r0Iyy~}&Cnp-!Wb%h|RO#Cc(x~K$J z8_ytb*?sf0=smc#c?_}Ru0B`Q?FsUiSBMuIk*PlfH@6>KP&Z$E%Db;`tWXHeS-*s_ zE4zs;h`eD?!4VV*6c^uK15WHN}Q0J&peOkMeI}webzuB zAwXA;NV1h{VA8gyvd%q?AV`q>^$9*^8iqG}3>l=A#T|a_gsAn+XbcXZnJlx~7LMqI zP+@M)yIW?DA2=s91mbZLG$ayfM?W93zNc4lEZh8rI+hOYDTHYY<8o( zC$5r&$A{kq?LRfzx~q>Z^d;;g_HvjW!Xteo7VEJH3A>zb`=EYvEC#2W3zWXZ3>2w; z4_uQ`=oM{(RvMuRc!eMK!MFIt zg%=hv+I|+W0TR7Q_d@q)>oq>iK8*Qy$S@A**^vx(^cPL41Ia~a%LoVJ@mJMHcxI90i!r2Jj|Y6zZC zb{q1X;G~E+bZK)A+|?^@$fb0K2Z>$;Ip+Ai<+|W?3Zt;1C(Jw+YSArCO|yE)Y20&hz>)GHTbrzXDSS%Aa+E<^rM0m8^Cc425MxYs_Vm-o^1bd+MZWuyF z1EPBZ$VIkYOE1o|%DyMZ(LLYQuRn=y|@&IYJ zhbG>IRMUm|KQk5gUKuDpw`VGwoljY=Xd!$Jjg4{V1_uI2<`ugh_A?`7vP36@rtM;> z{8PRPKHP%_+4<10PQDsK)_LSrU`;466Z_B#DHh1`-@)gQ)7VD{9bB-7IwHRUE~@}D zbER~quNdI`WQLaXnKhV+nITEJkn^4J6>Qk1$B;AuJ~&Ji!*uW6UiQfU8m{~66Ooez zW0F^;iCvgOJ0CK^)FT;AvI@+!1yO?ABGM3O>vN#o6_1w3zGu5FNj0WOAOMSH^4&{t zFbV;|V#XGz;^j4=w&$SG9mjF4ZLhm1nT$IV!Lje;4iPrR8)#CgU{+Fc1f=X`rmNtdpw-WY3@6OAm>pJ3@P%RP_odyn3cxOIpcS zqLvcKJ{;xyk=qzAB|>5><&S4<-s#tVR}eeBJU#q010sZOWB!YTvYm<-hhG30%^wAC zR?m%~0j=rN31ViaEm{G*89$M(WGhuB|RRnyeZj>NCH2sr@cFG?>7i9e4Hb!(@vzz-y z<1Fa6SER_RClCvr-xMe~X^n#lDmmw00<6w~$nK11E#HMuZ%3Gb>)_1V=0;-9;0Cw{ z=_7ypkYkW~(+)hdgiEeWB)cL+QaIX7Gs&=Ev5P`XNUv*kcA%k7AidWu*2$G7>+Ge4 z+^DHcx!mfdc73<0!%&-R$Vq&ylQ=EoJNv!QlY+Fj{`~pVILdf{TD`PcINEaNwfJH- zYXELj`>DFr4E07s+)S+K#CX!@pt=?$-KX!BkK>{_g)Ow6T&UiB%zKV36L&=`^YfEz zhHm|=`OK2GD+P7~`;Qy62c7^$+d`@E#)xg)iz-EV^b&ZxChAve`6~G=f5gqWEwb3g zNXsSDls_{*!PjN%X%#XBf8RBsd+oz~^)j zRu%52)n_I}uVwom_nl7HPFtvJ zG5RxcKbuU3bg?h=DVv6Zo11FHd`xC|$W%%CP^Fp{qW#N$@Vy-3v*cR+BpMzZ`o*hZ zvq`>+!*voqf=lCZLW)2Fz4CuO+J+^(vmt*05KFv3m2sG$mrkyQ0Or%~9whn-k=&^$ zSo`DtKVH~2;EJ*y=}~Iq937JJwB`}c!2oK+Qr4lxR4A~!b8~Y!F!vC*D4>`z$*^t6 z8P|Cr|AslnD1C+gw6O4u9E4{2{!x_X2^F;5ezaIwz4J)m3G86E_*spQf|eqD?$5Zf z1w4v{WE(RhF1ewDdX zd0_1G?f38B6wErJh$Y@YaxvQA@d+lkWts*CeOwkl&u0l*iXo)SDlkwUMkBbU#qNjw z!OMAZ4KRb>XTb?~BC}nx#8!IozvYm)IDFMIafiS zPJ8yUG_6~>KUrqnh~t^z~g&qCXRk0pmSzG*$rms{yl&<(UuT5`zY|yJZ-$*!RZ>t;-C#tf7q{luq)Cfk>=hy$!Kw zwj#iU+g(H%d4-TjH`nK0m94AQ=v=>TZ*LEQN*`RG0n^4}b&|;(q?#6-gQKVC+K$qh zSK{Z+{q~DF+X2_WT5gZbgK_Y`)d3BPJn<1(IuWW1{zp=6xl`58M5lLN`+sF2iI} zH>Gg;#o?-h`_m^%O@lfx@#=W6f&Rt|q=d1XXJMybC2R;0?*s-0mg(u~@l4FfLTkto z>V+uqt%SowOu5Y?X-DmCIz#XEQ&3)u0*Ng23%4*%>GrkJW;Nk@&bnD|U4xrvMV%%Z zU|g&f)+FS-7kdib+~CFB(%c;UVf+#dn&L3mVu57vfa2b!@wmeDp@~i(9qWg229VzY z%!2k};$X0H61&1gRAT{r^TB3tp7mNHc@=}0(JuF4SpcjIbxJXF(3ZwFak?)59pDKb zlC=u@&Q~)Fy_4Jdw@@I68><(HjDi~0t$2F>SJ}hc-6Nj3Iy63P@@QwSI;)iPftuOLp#1v z+#k!-Z|cN6AV+U#0kuHzfT=V0aXo@<$X0$PG=K|sUC}l8$}DToUBrX<;S!{IsL1Cd znfg9Cd8Xeh$v5Xf`CS4uN~KBU&Y1&fS&Y6BV&-#ht`DXhyry#y-fjRzhw)@g^ff{)CoOg*o19A{$)I?O*m%HLo*|w#MkzrbBXSXcrqJ194OqU^CA$t6 zB+ik$nx|&{84NJqk_|{(ZeU5$k=zzQ;xtab*?RD&)Ud4F=8V(Ma&|(`^x9_J1tux~ zZ+%uam`{gGKe3`kiZ~9g+2!87d)@T?4#st@2ro#t2;O-78KgL~>jca}?y}aYoe%pKTCa(7$;tG-xQXe0Q};aOG9Gfn zx2nkTrKP2j+LllrCf;?U0glY-@15jt5?@v*&Jf z)@x~bfSWsjP(F-@KvKTHH|tp>THn5ih;Vs1QJ8vZ4LAj5n)qAWn0DCmNDD6O27Vg2 zC%f3io+v}7rp;`Ka-Vomoa2!-|EW`{eZG&-!7T6{wN(&{kfDO zew3O7t?cnP=$2(Ngc^0tV0<*Ds7)4qj zeZXBsDLyx}wDM5RfF^JX{m}^;r|`E5ki{=USJN~b1~c%_?YUEU95yvEQK?C){`(zh z$uA;8*IG?r_@awsq>MW`3H^{fggZ0#QcK^c5dsIc0y18cBy`SC;Fn`DOZ!GcN!p+b zOo;n>)kWqwTlvk}EH6kHU`8(XQ>twrQ4A)~6!+~Po60%upy_1EG&{2-4J32#xv2qqrjhKW> zmj^EbMT~2a_sb*DnxkqLY?Jm8T-2jOk;Xc|?;{yBl4A0|yv`t%TUK#0?Jk6~>~YC2 zVEl|w&?PYgjEc%|{;#Aoqlwb&Q*y+j>vK|Z=hqPuIX0F2CwOQQ-ehqXvHV$pTjTJM z^r;i6TBj?&iDYh$u<`(HFaz?WCwVo2+`C`cYLmI#8aHBrW{j~_ z?E^_NH8(ULHDm}ywaBaGv@E(efGkI|JwHbU9v(;1m0{aA&KJvKJ1~k!bV$QDPR}T1 zM-dN?D;M#2mAi*`E+OrlTdIJ>DZ&s(H@ES_XbJE^g*y>B zctS|3#htJ>)>p2}Kv4-Vwz$7BnyUk4$%M(l?b_sUHPj{;^2d);P&d)InLJO<{V@Im z!|Rz4w_)~_gh;+6!}x(B12{a>nMM+-Zr=|aM3K{_DmNFS?|laFDjCG2fQ$>%A(oIIp<4_$0(V-P){D+WtRB z#9s%UgQ7ApeNlZoh+l;<5=7%aWNBw&vH+1bmcrz8c7F3|)UMaSbkM@MC0lpBxOlG? zd=7JAcujSswKB#J|2MMUVL+t@RxO!|iT$ zNk+w7Qt>WD_d9pGwm6NYgG3t-8uP;Nq1rzBV^t*Jzy3v?Yee-}*%cka)R=A0(MQ&3 zWj6%}27oSK6~d_|opvuwxj!_P2f)Ya4`};Q;t_e32?4#C_#n!J21=LPfL~ytjX{~u zMiR==M8hu7w~ubiXg;F+1$023@M>5w<7_wt{-UjJ?}2yQtD|3#zV?pd{u(!T$iJ6% zzVUk|`LP&m(=rH?Y08U!O09U?UZriHcJF;gYYoM77ycf9*xGsRgG4q|ZNM$< zB%m{|y$Fqg0TS_+;)eu##6yet3-z-sNII`L?BtZp0WF zS9W%wmW-XMz5MxgT5b{eUNd~cl+&Y@(=Oc5-?2Ws!~rAFzR~Efo+2fZGa!oY`^F96 zn0dP+lsnOYzVc!0vNDWsp0ySMtOll+@5nBDPIdR;f3_f{{qDU7mEV4H=O^M8vMN%@ zu>e8+w6t%@o${-Sw2m+-fw7j7nbG`F!bSqA4z$80@HPyRUw!7s+}x#oL~BHZi;;10 zcyn!LOABd7c^bBU7m)lv5qHkflq<2w{hINL;BOi+|9k~6-V3PkG|anC68Q{?C6Yt@ z(wmYAUw}m*xm`aLc&{yOV9F)%@2Y8%$)$I7exFt{UpJe$m(BwpZLibW)M|@zA9128D>GD!M0^02-+-26qW$%E~W^R_bhhuV@Ut9iMxcIiG59v_I`4LjL6dC$Up!b-@bi2aNf9%g6Q)0y(4?T47^9l zYVIuZ=p;?bcq-089UoDUKEX0usB||dg-%3`z)vgtCkFgE$0xN822@mlCw)s6F@P-=L?_|BQ_7N(Ix#KZM`RkHyc1n*o8C=)S^ztJUUN`Z$uIm=I`!hLuJ@5n?q|6H{%ZJ!9iMiW>lm3G z96^uNjS++@K=V+_IswQ*J*@q92ziTVVof zyC0AcE@JR&a&I|6fq-YFBjG)G|9m8~V!@rlj2qtdLW9I{jpz6ef#KcPYmE6&ai&R> zj9t+JuP8OGqf+cly+z1+%jL8B1_oPYU`&QAE5h^$TW7LhD8%`DCF9#&b93v9 z$}E-{?{wH3)YOz3ex;NbP?#sAzh-V#?&7w!t-huuM?>6GTd~%+>7pxZqT1}xRNk{# z$rfPC60(p3)EjT4yc%-zXray8wJvGc188o@p_7pyy9 zhJ(1tcfgNBsBpS)qVy%oc5P~i0CgPPu`1FZamS{ky)WlUmnoWkfVJ)jQ}T?M_jiYm0Sa?Vf-8Kkbe}>~8NbQ}E_;J+1fj{(%E3 z-!DC4^F1s#s1yg2+5*v}U5dfvQa(k3LA9BQ36pt42b$!;BB3WAl|FcMmVq}O0YOEu z*MVaBHX-4zl14S*)~$#=P6Eqzox0#R(})sKQCSyQW^^&HPtjc94}B%@dihNd$$U8I z_r#fmdxH-z!0;n6=(9N8d9$%=coC`7OtnHK`&jh7ok#uZ&9>$^ZpzK?(Vw{P#uKc~ zYUs$H+!MVMf6sBf%jmRVIC>ILK^p4&H~HEuRw^&A!nET5;S>U4&{t0zhjNSm;}x3P zDwV(J8QEsHZ?g{LiehEL+yM}T{-R>bY-V#PfjqzQpsZmaIU(rH_3N4z5t#dRd+2M} z!^iTAl3orhS#=y<>e4uSzQta!*irX@0j1|A2 zwErr3%UN)7u};pgXtMrn{_SsSxpgePE$?%DS}uFW=%SnG;V<7BiQ(75sUf-5B2P7J z;d5MEp98X$-~@;`{BX&lg+@)fgxvBQ(*gcVhrB9xnpcOEjc4+WPuMRtAB`^YI?&hl zd(bm*2Sv!>(v=%*4>#5)+2vC+biarq8T;4=zb(5-$gw#{sDx!<*15`GG&MDCS)7X! zK7~TN@I(IupfcQ?RGZ)9{=(<1<{oa7PikLjc4)54nq1PU3_?`Zs+dZ9^B7Pg-vynsjNuY2JTj#bOIO zo%t{I7h?J2!MrY&SEf&MA+5Mu{&~jRoc&+dSLEV__ghx-0E$D>W&1Y&$NinpSwFmFz97`r-kyJ2K)_s%cpqHr#lXIa<>Y_&vs#@S zd*bAq$M7MSF{arCI$`hX5yPC@uTG((J^1-|!#J+6BPt#YiU)~1EOI=en?bDxKj!@? zlz~e)obvA7uqRT4p~Gk=SaEIN7uTNcYcsFedr`D#XsPYtp-%?ruD}nQv!&M^tW~fw!$3djH?YY@zdJ;B0^cbpUU<5i)c^ zNr^+?zelsWy%%r^F18q5?d=Cr6ZP-+>EZ#e6-ERf9y;=&b^N|6AlK+clxA*!8eO^7 zIRpB?@-T#@3+{yn2pfuK`>wIh&ySoqNTlnZ&Ck!j_P>jm*?!8nalNM6Y!H>iJ(0lnd zE9?4bhR(tI7LOc5mzrhR&ktEYdGzQvjV6&_$&VjDez-W% zCae!;%j`%Rw39EJK~M@$Xz{=2_@K#reILw+u^@+BE@*cD;ETQq<`@EAwe>foVX6a% zkQc>q@p!rCM^yIG4e%hW7wxxyb2w#`J-$pnImx`N3x(G-{Q|<_ z(jCEwCPoe+FINx%1iq|8clU7Wf4A!=@U#Otw6h`s;0D9@^Vb>|JiM>N8-XGq_nt-m z*6@c2oKrmmRMHekWv&>a-}ZI4&Bd+F%^o4$-JACNVX(V&K^l1DwY^)DZ%%n*Q9Py02d%dWzj;HTP}B9Z{J;)Z5I(xxhf@rgY0Skd>Zj z_e_J*-~!~?9uS#%UCL-XULPyhoYKyzwOEs1=nsaFm!f1 zSgsn1aHhw5OBWv6NuzA*5fA@17akr?h1X6d{)VS2$X+`NQm@m~(muc^Oc)&<4TJyo zBH&J|^hQ5J!=Y^!eHQ*FA->7B|4I{7um5pfnqI2E{QC=``>(bnxQED8Kd&f(r?#}P zdsZ*1tJev9xub9ZCt<*L#Xp~mPUJA`Ucr@z_CZ)%Tm z{$lFm;7S8+sV(9~f2gvtapna2j-59t|N0w=KUXO^x6 za93MGtS+dzm8~Kja1K(#oCMNBAxBScGXX%bT<4HAiz|DzIM(vdf=Kr_2C&a}Uj1e=8dO$!xffgLM zZ1B1$W=5oAY?7xo<`zZ3Q`M8(eWFe?0yw)q*M!o4oci#>_JgI>q@Y!L301#%t=%~C ztcmI?n0^E??Rqd$b#QjPyFlC1b}b{&w=bK)`PnnoN~kyHwwpSjjI;oBe+OfQ06k{d z9krWBUxMNbH|X_Jl>jf%E&aVT3zh^kbVLw1E2ZEsI!(4V<6FzkP&Ny+{H_S zl<3EO2JNI-bjP_9W~pC(>a<5II9rr0y;g8O$r}>vpF-ZfR=V-~+b-CB{_I!Pj?Z16 z+wnW(-TSv`g*ReX3UpvQ^4_Rn0P`Gm^aO3z@!3gXXWdkq`i6H*k->WJQlD~jbNY{)x|$Dc z$-r(=ahH|m5|!vC=119HGPnEGLlM75>&{ZNwS{yVP$Cc7jLvzyBxntNVUaJN7c=b- zXm95Yv6S`Z3e2;uzQZC*tPEiD>>hbEc(>T)Z$B|<@_YAF1oq7l?lD@Xj_9co)Rs(_&(di(r^9KOGxTtmf=3m&N42B!5GJF)Hi$=iA&9%O$Cw zA1q-o)s1?Oq>|F$#X8$2eZAa7C4f6$B` zH_ihIK))fEh~nSG zxwXilF2iz%kL^E98aUy<*++w(5d_$}-oO9qzC(>A=)&btx2 zzr){o6-d9%o*RS@f`6QFHJWcN@$@Xd6uKTO&&%_S!?a;i>rM~T@CwU4-||^%$=ZCx z7GHX29?KnGpZqq;pV3B4j@WDUm$yH>q&u%oe@JOe{g?hvP^C+!wT!udw{xkH zZ4nwILA0smyU?Vp0T$cp*HUhtot>SHLthtViR0&Ik;%^Ag#a^q3`j53$oi2;^u(9K z+>arqL%!hE*us6J+}L7~NG*X#a(&>DfD-ZOput5l{I1A&6)~SRcvFZK#Gn4koNE>9yuY zPxwkp)AK)iILTgt{t`29n%jwta~%xz?nhwF+~#YAGa&?9g2(&(L8C5PE0*w= zdITqjOGyuk8|EayJ@JP3LU)1CYa4F4!dlDIlun3`#m(1?3g}bwsMmz*QodF^=i+#h zGB6M`^ELE`s+Hi$@yXBMGZrBtL@rG|nq9bWZc{u<9{SE_4qqtevs21`;DVXVM&?ir z*6M6j#;OaiIbY8#rQ!nopQ!O*Wy4(y-`-bW?IH_wL=SWFQ+ZWx@`{U&)vElvoYo2Kj zdTlKmW94Ww9)~KQOiiap@OrK>6)N;*|E<)L%pdvov=cX^8$BKP($)&}M}i$=#nw_A z#yObLBSTg5uRxCsANH0dh8P4T|J<3myeGNeVm{0c&FVUwLHe`Vju%?_0%(@~sKBPT zSm^lF&wj(YIO+Y3jgc6{C1Y34)#M35LqB>3SDy(TSS+*oxp>(2Q7m#!trCAVhpLBK z3miqwnvXyvI@Iv{cc=GNjmjz)m-Gvd9%c47$Y4JE&abQo9f*XeO&Mwn;c8uRGpgQs z{dfR-`UXO?Z_|?_^;>CO1Z+QiG5BvVNx8c5Mv1tY#$|KQ3n!GtcR20lRrRACRztt(8fdz@V*e1c}@u7BS8-#y1+&T7;t`p2M6BMG)J#j87Mv}ST zBkR(;oL#zohr|6`G=*=)T~K_(z5_t5S2j++B5aUabY5I*A}D z_VzBxpJnZ;d60DZF;}p}`Fx>?+DD7Ts$HF4ER-$#1+V2AOHZcU>oQV}mF<`w>WRxZ zC5Bltpp+I%y9x!rd%w6kQ1iO2bF!h=Q$S(MSvsa?i_ci;!Y5e2tPwl{nha_Nbc*u%gSyv|C!(;w6%5ByB#^rl+h5=m9cd1k@sd#ho?7>d*7dw z=jhett+?O38A3&$P9c>3Q^9x8{*%}C69kP0y@pZOrB;Rsp8zh`nEB`q1Ls?1cJUho z+nHB;{2KS*@5Ncmy=Z9;DAx52JfCGN{q(|(^pfAx=iYw#QgSu$Qyb=_lp{TozM7OR zvoc$8BiSSRcUPMuSB?2DR}+Vb?3^5dGB@njhqOv9AwiK2Po_UxeBM{`Uhw64mkg(j z9T(CaJ#%&gwleI@iQ^CZ2(`!e0319+w`#CraELQ~IR%0s_8(bsZEU&IRU0#TO_%0|Sr7b*1Ol8SaYA6v^(-%8S$U{HCX|CH40f zyp$xzB4<_UJnQw3w{bVM$F^ zj19ZUhc;Hb$L^o~_UHC#I9Jt=MUCw?K4@-C8@fM)^ARa_e=o#A?bt{Xgox<$6bz!y zE5VG2Y$QcNg6;qHEs*oz-iPC>9+N_+zCs?SU-)8Xb$RFS`lu(x&CNn>#4xQyx}ODy zu4$xZpZea`OMQEF-#06jun3yv_qS3vO1>E8qllG5Oc4%eNG4Zwbl%P=o*prr?f&DT zT2kd@8&Jc6;s{qM0jD5$+;XU60RF!-hho=U6k=KWjZogD8r`|iV|5~o*t>wD!e@> zI1c^U&)<5kZInA`LiyPR_%n1-`0w9eo?z2(GCf%HbSzkYt5kBXlu9wn%kY+)qLgc0 z4qIR^_wyt9)3_h2Fi=mz%-mVnprTC!S?yI}x33nSj<~rM9Fg~eI*|cEPS1OKd@M5f z@Bj9Q&v3wN(uXbRtGMpkOe{xiEw9YG$Hl<*jg%NC=V1DJ*5(B6?W9w^a?G|@8%$-euLat`G?+29*<-REQJdT=EtMvUn9Gw*Ocs0fH4AlU)| z1qq7eq!Lwvf}&(WBnQbE6oo?CiwQNqN>-wqzPeMThwy%kSJH4IRB;M){!e&R7Oi4FN%H zml*A>r=enR*32#3v`dAze@E#hfpfop2wP|dVV5)Q%fkcHds-eI{m&vnc)x*jheE}V zl;*cA7{!pH;uH9j8?E=!eEkSa^c?OnQaknthBX8&*?RE1>_0Er?$yF8aFUkdm% znJmXIo;Y#B5JBFuDd`yAfTN06kl?4=Ur0i%9A7c7t^PnK5P76gQhG!U!E?j%a=!1j zw&H{;ajGR+geKyjion$|XkVR-oB4p4+KBX}a~8}UA$Cz*`*D>LD%N5%7khZ0RM%a- zve_$iAxXFY=he~=r#nvg;UW#Y%!}Ui0dxr8w-E`$6u=lB9M? zhh^rQOA)5Or?_{y-$iMh-h;*YEhj5dk+*S>q4rGPS@>j23+&JQ z+pOK0gM;LLkP5LZ|BfNP-?3J7r%3k7`22mPF^@(5MPb`xuhNtq9FLdJ?A~`5cZ`a7 z46jNeXv-#o!@!Wju9lQj#4h#r;rlQSR?lxQC3DuVtrrb_0U=)+N&VO|m0^6Mg|7@+dckIlq33XHs1~x8) zvKf)Q_z=c6XL|j#ZWNp1J!G2N`rn>LTs2RZWrkaAv0onr5NfslZfk@mX|R0!7!!z@ z)j0p;@Mi}6CjCs7uqxxzd~R0Vb?vmW@~<0sanF`*@BfF1li9t4or^hhcjaXQl(C^h zW#ThX>xAa)Es#f&Ql5RpW`>{S^&SG78eWYWaDnjYB0*FkNTHJn3QcR!m*X zxH_1YFrnhMjW6xXrf5zTH2F3>oT?gviozgtdG?K*VO7`r@YGI|2Ly1zz)q7Nv=(9j zgr7k{3%8TXuYcQaU2&VVTcuhIqi`Y9TZ%g6Q1z3SbxdfRd@TFAyhK=L3dAbA;JG|z z0}#To_~PE78;U^hdQ>c{kWevcO#&Vyl^*dJk^$H==ry(@xF3$czgs_4tmhE255C{d zhZ5TM-_zF_sRhT++Bdu4PbV&Q{wo#$A72pPB$92;7*gG;bK4nBnvh+Tuvr+Uf^q+X z%#@;YD}vkdz7>9k1_%eByG7k8UD%&Gtp9fTd9BVjIw>h>oi6aV7v8})v%#uyhiAzCIb_pxmx%6N(5qLT=N0dLdXTAO&PS^m zj$~a9)mPvi2~llau2?`hdzcgCxiX{q1s4wzx*ggY7Vh?(X(l3Su z!~v|J2w{$T!`M{>7e?;nYSs_acom-wN;pSjoWxa3B7 zCcxC+pt)!lRC!Afh0YJ};bIHhevdI?QKV0SEwsy4Tzc_`U(Fpe358qws(B=ENu&KCKl~aAV zXNKyBJg1u%FJ4@6NN|k@K13G^zj#RRoBUXA?196OyQim{OhBVLr|-eCa&k;iaaqgO zT5Vt-O(RHo=FCZp^(M70s1r0ZV5JAlj1tcu09?(>W(FYK-3JjYpVlLOB*bF={X}+D zD%FuXajXc1((m3dX_OyS<2`XwkGA{(g7K`l;CPE!oN{f}Kv@dB0oW*NrW;Rd% z&;9*@7>SKU=(;I({RbbHObku-u*wYU-#j`X4w$grS5-Awq~jO>PZ{ zZPUj=^sQ%Z@#NM6+Q6Wo!?GO1f$qNbx%2IDC_X4_z_?Z99}uu_@a2f76=$TOLZ~QP zin~-}p=_o^gEZ8xz_(V(=!&Nmqm!2=N94uvrLyyMA3k&rt~WR*@pf{n2<>VeED|F= zQdMjydPON7kv!WpUTqap1_A!jT)KF1m#M}IbsZJS8Pnh-BP+{D zkKu5qACTt1}jPfkus5qpo6JGB=VTk8>O7|*N3L#q%a2aO{_ z)#?AipN1tZo5cClY4}j{^fAxU#bd$ZYfWybFHrnRz%7{?>Jg+N@Nui78wx=*y|L&g zrlxZRz=`xoNEAV)ffJrSLZ$VKWmkIWd=(6?w1fylh*dzyr-~3u3ALA;?064Hos`6g~YA7V3Xn!C(-b$+uqPxT^pzk4fK>d}yUw+J^2|zZ6A3TIoIu*c9 zhhcSGM$TER*VWg@Eza%Q>j@(RYg!oic~!*L8~L@KO%XPgy-uQsRoWnI22kFdd>(Fy z@TV|@XyhUY*Njla2o>}X2bzl@x0c&l5C&`j1d7_-yZn!#!nYU@t`~-|C>DfkgeMk3 z#Y%eYJsrYA^^LCSNoc59$+c1q1uhwpB?m!Jk<;0I(ruKCj5=N2>ri2A66xX)F#H|~ zYiNr5%Z=Iwx1#9qW(^DstR4slwD%(#0y%2xZmjfvfgR{LLnyqzUQQ!u)ELA_pO3tj zX?*sMD&clS0RY)^4bfCaPo8`vSW+8XV(fEFJYo!?71)ocZM!rCy#{L#!#)GE!>I?& z5~D$G0->L^3;E77QAtpQA-t;^qA2*KQxyn)9;jSliHbjc>{lPkGtHje8amWo+}Luy zb?dvEEuJ+?FEo)TDQ+}wkFU%xHr#g9cH1Z2IIJo|6d0wnAo# z=WjGZDU9ReUwffYq<-msD-?AIv&`XzTn3pbE@9gv);JKV z4;JBnIsW49hXDSjtn!E@&3;o(vLC-h|W^lbe>+L_J(qfskq{<$H+WmDt!Mf z3I}7NKymC>PAD6eMe$y^W>!_>lPezC{{$;p>ua@771$%%h>-S_FZK1)g80&nvq#T> zrepc*l2D;vPbAaE0El|lcAX< ziKmaG_j!`(ry-#TO(MFS+)wmg!-7UGyoQ1kYfJ<#!MSvSMEZWQO(|@Op#_>tVxeqV zl(NsjeS)J-NWW?~vSW(f^mLSz_ z#_)!7I!&zj=>o@&+)^SH9t(W^=V6#8LNJqur|a2Jnde`lR6pqRpo6#4D%~Q2q(5T% zB(dsq$ERv{$_wLkbE?xjO>9xz_L@s6R&@;eHE=YU4!ukj8t9uli zfz^7=nXpY2xg3{neR)l9rzKunHtPWw<5!-;Y7QaOIXB8N_SXhQJ!U$`^q<+v3hx*{pDIImLGhKdt$Rt=*+0yz-CssHErM2B%xE*bftNM!#?$F-sxX zK;N80O|A3b^(WaMPXTFKF*B)!?c-+W7bL#T{K zH~tl3NZP?iDTcx-Jx)R4F#Y52Uz5K66V+7v(%0AX)Ht{IML>Ebk+pbNOoK|$h#SIy zNXh<&$S^$xuqX>9i1KDR-f}hwoI4xK$7&7u98eL)|BEIG>WTgEQP2?aTQ*YyZ)y}d z1ZCME*CI7q=Ct_!aGVW2PaNZ#ibK3X6sM}dQ4E>G<16nk6>2UiD~3`ZfAQs%v@fz8 ze9=zSf=0H_0UbDLCBX-KrY~9l#E^yfg|b~NpUi22a>&@Tq)iGg!YkKbCN<7@l+bmZOGzv}BxdQ6?Ww5&0-4cQWAH zMD>_?P}Q)fT&1AcJ!1`14y&irgetu4imJ!v02`iBZ^Lb(hcWB04-e^b@o%yNqz-%j zwIb@lO)9(((qFYfRbZ(p>D|1>zvQ+T^=h=z@y~e%2wTK{x8Aw-hhCD}vo`1X*cWrT zLY)jbbB~_cBCZ~_K{_*sK;x6$29us$XrfbY#aX{5=!tw1Z_LeH|Ec$a<;m?(dkw%B zk@*nu7dk6wG^#$%ppek&n&NuP+;B;So{Q`=MoA%?hH!Ps_cqO_xb2ieU{|{jM*IzE zdc9wqFf4*IkTj_Fxc{9LQE;il#Gjb3o4)b~$4(!+4`unBq8q>CsN|Gg^q**XoSm=6 ziDSPvt0V-)@vpBCVT~__vORO9aj6JlmH!Q;KYo9d3ksw5gZ3tfF zFynmw$W~2LWZ6~LobZ+X;-WQdbqtf&O6tv5`-j2%U6dh0NbWPH0&>GJfhvP%`CRW! zynA^z`DIOrk69xx+o{C6!N=j}l=UY8@pP-txn>i_+=A3h?tK~0{bMZ%{(t~qy4t~^SAkeSkvq0&~$Zo2tFta*g+l{xks^Wo{Qlc`UY5<;{&k29^8 zdZ)|3V5uOjxpe%-ts6JK(ITtSg-Z`aL)?LQi?L8;KA=Hjx>(8!HIc8iV&wGfJ8i0@Y$Bybwl=QadxZvYs;dkw8Hq4%>A?< z0|PCMS~221my3tjPI#Y06@zkmbTnG(Ds8yz-EsA#cEPnGVmY#ew+E^g->?JAqU;nd zJUoc_F>!1ymv$4#X>T{Y+c_(doj!N#g%@9T%y#_P#^ZKR*oa@&R)`ZO9$JJCLXkSp>05X1aEn7O zPDSR`ivz~=F8j5Krw?e9am^maef$J9|)pV4=IngQ#=q>O|K! z{LBW!C=0RziBW8qB7Bo6(grXL3^4Y`q*Z)293ge1$rcU<^1JN}tFkj!lvL$T<2EsVwHdQueqabE9cs%Z&kb6zC|rZ+nKD^C@+Xsok&9l$cqSo!n47-)oMo1b&0^!;_C%e z!ds4*>&aH&^gM$%k_VwX{|grCK8lDQ`#tATL=pD%T4Y4RqeOc$xKR5+APDu%*DQND zt2)bdU#_@hp~SlvFY(*3$yq`S zju491ul@ZMPo9dov=dySJjYpilC1BWSDix`WXWWoKn7zZKo_hK`jPmT?8vfloC3rJir?b)*q!jh6z`UtXtge+|rsvw(v z1W@3GLgu2b8?yz-zVMm+19YCs2Fc;0@sf)#@`rRJq)rVs-F>(3XC2r3w4+eXCz>~$ z=h&a1Zn_J$FQ8YDB_!JA0187%8K!#gUXljj?-aY#pt9p*u2L6|F}!$|?1Mr%Y+gUnZM5MMrA6#?V{E9LaMmXJw|?R62d^dRFh96AW(UYC}4)A6^LBhN}%7^ZsDIBAvC` z?j%zA7)=}}{47Z^>wz`(v0r+!cMnKWN4ml^w$u1_g$S)FiIudfe&30YiRbY!RToe> z!&lDWv}~BPHmLpbh;;3^K4)p539p~NpqMB6la|7Td&o8Q_}x4BU&8DR4Od zBbWr1i?MAszxh?`igMrN5~DF6!1@M2kJbmod|#DqmPOUvRP38D4hhQ9pyD%o0lg)M zCvCu~Dg_7;qR?F`|HgMBV6f|uvZwZFJSLn&aYE-j#fkH6W-Fy4hB^=W!$|{5#A@2x zc{@{x&(pmG7i|S^3hy}n7nuP85Ww3EOZi#x%)O8L?|9+3B`;QX?e@dl+UZ_4o1(c- zfbwMW3aoYU8{;#4*~6<}R!aJL!ec$#~ z{S4RFVr-o5>>Jth{uB^lXl3HJaaN5HzB8Fea9Tu~svaV-y)GZe-*yX8>r0y*m`kc%0Ks2EFN5 z?gEO1Wh_NdN+>(O)(3KrHE?z=p#lW(C?eLHRc3k##R7SV0S7Axl0H&Ag1|EQPg}3N z^7Fe5g^@VAArKrC#huyFp!_@v7xdjf)w-t`=4`}N+N%@SlQw+2G`X84r)^@kFwYbJ zkQ^^-HX>%R_5P~gM0qSxJM`lTc*8+Rc1n|w@Sw5VL7?pqY;CdfBffO0x|)#F2l(Gc zZ28dl7&LBkBp~3}@2M$nh)hoCQ(L_vCFKcdB>q@}CDJz7txQbZfs4Y|2-dtYc{mfQ-1` z?gT}ij(EgX*7mpDwlH5WNryI+Z?MSt!0zdO)`M!+ZcLNa#OmU#)u@fh9@y%*;3Edlu{qZ>E?Rve^^uwyTf)Z*ut^zlmAVL~VN}qm>%0!sfv<^DmucrU+rD znV@es(&{21!84YC;QZE0$eTY3v%i({3A$tS*^fyALsSXGmH*nwCD%&Hj5J3C*^7vd z-XrR$igrUdAQb22%W=?zK}gSP%rV>)zWN1K$E9ciADZ}XArmiSW={9(&0W-T^u#gL zjniT+Qv-L{y9rT4sGSop{+J)xk!`yN39Y>ly7pS9y85ije8XfU{Y{(1)Dz}oQq$p{ zakK(w?rC)&=cr>t^VUqT@18=~4};t{P^xm}-gtQ0$8F>DiO{nKON~N8h>>)eCCg%H z<9Q0TGn*XjTG5bFx#0S}yBU(c$G-GcX;V4ehl!mKT8zG=5r}VdwwmSa!3%jE(5Q2q zGLDb3dM!|=k3DV%GS=tF?vAz&coZebk~O}HW{(va>nJ@vRnmFg`4^24%j$1^zabg| z`ES_;xTZOj24Tu^^OKrhLre$x->4vB0P{glIZc+2+X(&j5wK<{*7|I_( zoYdH!VIecQAmc9KCl^G3zck!#f$Dh;qJ#5T0j4^2#F@sDQ9$=rNr zrm**Si?aDjSQppBP>+)>LU9eDi)xTbEm9?>6Ws9@_3?5Eo>~s6O}dBqx>3P3b-k8D zy3P({^4>hkf9i2cTjH9|Vex^p^^~mV)Y&GteS!j&v&;%|E}u%ol`1*5XuRz3kJ{O)(p$Mp9O06A(C)q^5+1br+kfMj&(-s?cw1gcc(I@>(9|l zmBycME|BdcR4T@F822J{!2gYU2Z{rHyIaewNihDb1!IBX98J; z2&=s{!EkQ^8fD6b3Sc>o=#o!Iy2Q=j;6pv5o`N-QEdk0!t_CS76$|RT;3U$F4e`f_ zCrCjq6b22j7AK=`(_JViGHMEo3ep_9j#w(St|c9U?)dNDZv|Hh2XGj((h2{1Q8?H2 zBt`Ai%fe3l71pKgEyzHjq)Y*ngZm@JVW9w9kGYK*UELH?|rc6LCZJy<%P>zeea7yQfAAs1A4j{ z!}qxYTb80CBbD=u11smIvfJc79?Sgc+B+m?r5Ur((c5KzYmPw%+u7T@B?uPsW8>t+ z#Em0VQTO}cgE^(BGkNgTPb@RkMTsd&gO2>i<}?|g;DP|$&H*j>7AOLBAU2NyaNj+2 zjmHxDOfnK_7k4?kyC2M;zPN2K?`{=|I4(X6RwLGl3sgAKTSUaFNk0_c#(%o^_a1v< z;n#F~vZTYEy@i}${2hGQ^!_W=Z8W2G&_vq2`@_D)P<$m}xu=(T0tsPC`jEyGDZ8r2 zU;NDa2K(6y1?d>fUt%f<17fD^rtl3kH3!R4py~G{E?`6CnOw+GNIDtx@g2O3hHatb z*?+7PPCBodRYAyVc;`)}Wm(SFa<+xctv~rip7-^h{dwIbLX?y^(L$|gCiVe_hu1{p z9h3nTk!1*;TAJ|*#EJ;d61pm@|0F5XLfJf|ce z0~nB`d|LO??9y1JqJjeFTavd(p9)CN&Mt~$*9EfjGGZOIg>`ZbWE-UO5OxzuV~Yf{ z)Ip%(@vm3_Mj9HYd-0Ow1uF9GKYnP(g@xrS_``VlXip^+|BPkGun#)S6V$t*bu$a{ zrh+-+UZqzbyS}4`l2Qnzf)K=&2cl5)%OwFOfLF_`MgEi{4tzs7wfwWi!?Z6pRdj>v zV#;}F!=cx6Ym4LVLbN0d0!kn*(Lpw^5+;;6ZN^k~i`EcS}=~0(iX@jbJ>vbt_74|GJ-Yujh%8 z(dAH*@Z~nSDJ#e|&Aqz3pIW-NWBJ@Ljgv7=MQH|ySN)+QC%o;t0HBkJvY9w7Uo?Xm*jFONzJ{iyl0W&GnuR10jv^d6DEpf*!lrF`$cg0E zD&o!r0@?s!WPDnSjoX-;RKE zIJX&Td5^6gEAYYCtfRnW$|2OqE}etJZ&$#(3~8kJFHweT4gd>JORtcegvx|-#i-L~&u$i$+frQ2`1tYT zhq(ia0^LKffR$#S1wJgTk&i)>H;rl7iP0e$gI*EHe$(U(c%R?V6I@ zZ)9mH(>;(Q_wG(=a0K`u*+yFJR7dck)9{%Pl4h>>97ssa+T{ZA{`m>1Kj=1=zFOxHA+eF`(S!aGfneR`+9VkjH< zDS{|WQprEJ*QDpo%9L7FjOh|tEOnmcgdlE1%T^`xJtC}40VhF+ z3|5Y^hvb~W_pS8U9a7PZXzL6sOD?6($aNh9K~-5Hyn8R=X0#Vv@p`{8MJxqVfoB4uIrHnc76Q~S1;H7YE@CcCFc(7*hI8}CzHZrGJ+jqA5K~A5ZI?U`vA0*bnHl(hu?i+Mg zDLJ&DKX>jCiGFwhE9pad9)Qv}N|zvj;59U?IK48<4`tbHE&}`~Rk~BpwQIQSc{ij_ zCa>t5?GD!V=Hu&0XBs(duid4R0zIjMN;NOkEQDz9BX+GKU{$q~xSKN6t4G!J4g+;I zMR-%kJ-3q@$_tsKvU&%8a4NbToHdG&00!vashjoSwp?%y24Dwj zrF^J;rSxc(rku=_(l||r+a0^fzsl7hoH=s%Ruj4-H{Kq4BUgJl)bCCq+7PAjRyS6JR6BElW{GHi{^)Lr4rahN7q zHpLgsPtNRJo$rh>6`*@}hyCloE9u(j2iws+@y>GwoeMa7v)alpf_e?5!D=z5d=0#) z1)KXo$$~F)`lY;~fr0oM2{kAWci)_i}EHxb`1w3l3YPnZ1bv_^31`9uhQp-1hb zUheGfN*p1LYr4J@Nsl9#c?)mkl}$U}-!-Mq{OQ~-1*&u@k^sC>%EyJ&sHWMH3urXQ zdJleDCgchlU`L(kR2a4ImSXP;s56+@)${XEXB9uZGa%cLLssH&Qaq%~f|}Y@a}JVG zrbg7s#ZRjj8XLKEll$rPiWGpc?=i04Ph%lurww?$=di?qUb_pPVU!mqG&OL1!J?1q z8{aTlzGN|Y<``b?s$%(p{7a4?#fSX2j>GmkCNQGjl7>WY`=Z}EKrTb>|v%(yhERfz7W+wUiQ0RCc0>}6U6*64d|&Q z;jXE|nmmk(v0<4VG%XGe!YZX-9jdaO4|ot@YN(dUnNfW>pJ^R8AV}dNgrL|{?4fK> z3*oDp*bi)qdSlt7vmMmG^>psnN-!((xH^;e(+gEisl zw7}|p+~J?qP>n{mL#v15@0!U9O)ab5&~=MN_RgmuU$acfYOj+8exgY zb<)qCb&la3NU#fTd79sB%`fy_e8kE=Vol)}sJ2U5P2^eI`0a>|awHwhR!=UhI%%o# z>9J*c1YH}Iu2uQH9)yF^Fe^RqMKtN6|7n-wilKrKtBv+1{%Ph=()=XPEpP~Yq{mQ; zlSB6IDPR*zfExeEbp=8mPEF!dKuziv)t!1Tq32f0{#vaDpe(+ zN_CAynTo(yR&z*`^VUi@cHk0Gup8EovWK)H2(Z$BUsh-#KM zR-ptT3F=VF&54;w!(;RWEu1&JnlgI-wFxs4$mH${61*&5J-NnH6~KMZm!T>z>apzf|3s!7tBq~ z%=R{KBz_TB>h^krnT}wf_g>xHnGCk}$8bS4o8OKS`D=S|xIGmOq1gT|omj_jxz_?4 zWpkf7vZ+0l&0Q^-T^Kx5sgonKlC6YI}H zsj183pNsU@RgwPetmtJvkmfZUDCXBsmn;a@9ywEfGV{ zqm!QEiqoLt!a*P4xVTbi4%#OVux4cBqg?A-2h8k>U5pfskS_0T_;C_XRcCN$NJzHM zTE5OY!Rv5W1JdYuK4qA@Yrtkq!nNF(W3#(>d($F8ms5Epmdcx)owp8S zdlZ>E$M0wHKk?tL{k)7`Z?!Kc!fr#2dRz=!vz|;{3S2uU)pSsYlgg}HK*G_xdc|N3tsCk9l%-;@omoPWv>|LB5}HK{6TYgM$MD zyPLOwDXTg5AksKGl1%l&rwij$7T@YRfBxL4$Mi?DHJevfR>nY@=4Z^ikW^Eh=+pp` zS2u1)r*g>GExfw%s~^L;SiLp|Qa#AgDz@5GXhfm(*XW+Ho~-Ft=oSza6C;5soGjD(%6>;YE-nry zsniXWTWCWSYCXN*Degyf=Np)62oW9~9SuHv@>P82oBNzpTT}t{!GTXtp-}9+AQ)%q zR**MCsbOq9Gq$m{RU}iFjhW4=duJLgNkg%UtloD^}tL4{JiAPkP0Vn~kv$Ebi(Uz8WCV(6GZhEZO)EeNx-boVD73I%7F z2|YMEBMr`wT3T4pD#Cf)mM`;2RPB#Tox3_crUi9;i_sG_@>i6jQ+WY*8ut9S53j)% zPG(5AuesR&vUw@Dkx8qkv-7S(_l6$pL@Vonpi*~1YU=OzH>>qvN%&)yY8jroDJccY zzC@vDufYr{t6u`Kz{kRb&(NHFRULp43ewN_z1cnt<wBt<9{#XGWyU0SB+9H4Iu)F*5`w1!#(7tR(XwSrRcPKS5EF~+1kkLRgtO6IZW%L z;;;fM*Q*M2*$6B5SrjTsynRESM_L(XA5`Bhcp+qNAR&C(AS+`5nA@xd4J=N(k3khp z`<Gqe&k=NvUyoj(5SF2)=FV;6ri@?lUo$9(f}lE4BXPF? zb`sMsQqyWG^I2+`Hs#)&&_q@MiRJC?4WUb_XcUTpW%yWAZY?4b`fM;`)=HsIuFKfh zey&S$!9#bWCw}b| z=L>J2D!8{N9m-akYEd4yk7GumE`56EfY+0*$_WVueX=r^y@jY#i1;R46B6Quo~g6s zf;evUVSy^=6CJv@)?6xLO9frPe)HZ5eKOyzB31cWl~emlEa}*8o0FB9g@t%pW~O@> zd+C`Ra*wl|J8u7-|tcsef1}<#Ck!$(F(u9Afvbfs|G)0kL z-`@VFgnY6@y^KLVrFl*WA;)-YpRvM6z9?lIK8AeSDjr=JH5F<@iTuCm!Y_8@^M767 zHH<_yR3`kEF)Y}`3V1h z)95*93jZhyA7%I{HI+TPZ2prlRM^VrAtWk*;=Cmb+2tEXs5{QkD1G&CyIKmo!+3{_ zZzzAU8~z@3TM4vwpQL{jg@>9n0D$jVw~~;N4$-OE?DFNVC_Fw>=nXCv-0pR_{wV6J z@WuZ;fqt)@*IR|i-LV5Hx37LahH7ZjST5&9qoMM1*s}|KC2Lm&H{s6eSdaGu0+f0# z@sHks&BjcJuu)K0p6jhYwh1Rr?rm3aEL=h9|8SqK+Bax;a}EaMm%>Y+pS1toWBYxm zHj3sB|LG4Uo{I50bbIh)y>EgfG)&1KpYbDvWL=xa|gfwX61d3HHS!)fWYUxJ6AmFsNc&Pe%65U;kYX$5ZBj!Uz4X`xTU zUi?oZ1L{$y*7fPxIq!-*O;L6KIZ{%7r{2#cPrts{*(diBMx!OGZH+7j%dSm+N1@i_ ztqDaR>pk= ziBcE4&u**rC3A^>XoE3A+?bS?CwIJ<(9>&zWNzgc%mU5j*T~sz@mGy&EWh(+V*@OQ zCWe-lh=b;kTSrGmIuE@$N8Err3{zn$!S2ZhOyuRa(S1p+4J@DC=q9er*4Qm_l3bN> z_niKY!QiER#~KV3m3U1ylvqzT5cH9jXQx~km2g`A&%oSchMTdpt*epNZXfsQ+X$!RT|;(5 zByp%uoiZy~K^xfQilUIJ-i5ghd)KnB#Xa3$cm7QQ5Pz!ca;mxt)!iizr&FTH%xYR< z(r%7ad3p@Pk3efYqmZ6M&zI;yTSE~?nYygZ2aG?T$Szio~XkT%&@IQEt82xF%Vh zhAA>VXPn3et-b6z1>77Det&!QY}ucqVZ+zAzjK69rDEfr`(%hbS;b@W-Y4!;L1Gh# z1bw$_6Zx_Pvt17>s8NI2oo{)h;9`}{T*|XKu~CX{dOEVb4X2(|rJF%+vmCV`9)VK0 z)nH^H7`oo9{m^>D%cmQn1#^?j#kMv!8B3NL=>-`!9){zdb>M`D2S=IYd-|9+gok(Q zD2b#0tVQoF?CsT(H^SYjr@3E-SP6yS61PgwE4`9!ZtBl9(=Ig|ww*KV$R>KX6eEhOl6P4 zn3UYQW=@*eyLe@lJ{1+>$#m{^`&OlN1FZGIbi!-g$YBm{!eb>Mf98iv3sow7_)EF8 zyP?s(2gh7k3`Mqhi!8-Dj*YqZ+$XNgRkk+4Y$v!`X;+&-*3oD8y*Zzvg|WU?4V|+A zDpFW!9hf46)w#4rBwSE&8k;wuU&#bZfDsu+pO=+Nx?X&qQ)}iePiY z%d7&XSaQ9VS+09=V6}(bef+YqM686dvb)PbUWxniFrrz==x}Y=gfq;VYF;`CP&6BZ zdAK?iMm!?ewM3-W+_+R$A3*+TZc)T;BL8-zj7*)1pcTUd37&7J!7oY8=CBgXQX2}VW^=YG)34axj z`OB@hN<(z>O}QI&_Yc-7C{|-gXvjnvXF<^XF+MSG4wsOJx?UX9(@!g;=QwbETk5BD z!m%hJhTCo8b{#`2BL${4{H$onf@*%zpCG(Dt*=P@KF=*uD%1Yi4+b9tt~qeww(o(9 zS4GP7<;iVmUtq4$=RNep%;F7)bE;BvFvQt=C0|?FnIx`khrtyKXkmv)dj+-@sy)7_Op)sdjH}+ncJha- z1&KWE7}^Ui#L@bew>$7w(Nge_JAC@SIi~CRqi@MtnFp(f<63r~@OQRd{|yUNVoEr9 zz>05jvm{XNpP^d%x$-bddF*{LKbO@hjzh7BR%|?A{YTGO?d<>T#?niSNWx(0AKdnf zo!6_(aj!2oV=Bib4a&PF9vm)_^@VSYd9I`?txi+?6uNhlQ3YoS=G9Av#&SjK_f3t* zjCXeVar^7-!SV@SlA^^HBD3FqMYYG#)5O`{inW{YClK^z9a*+%RLu(3N(ub!?6+Rd zA4gZH~r#3WaiS+=rOR~h##G?x^Z@)db!!b?~Wd=NqzZ(rsf3VCUggh zu#XBFHhdYYBfA!9cy$ZxUIVgbq}=B-e6mM+s_Bem7)pd)cV(yK%NS8p{-dueI=b;r zC4hDAZxM5ntNW^U`GLgxkLPDsJnepcd*^hpdUpSq4R_=3CeniWc^l;Y!;-@wKd=a? z+p7*;4_a>XGCmYf1SjLjW605QnbwN_6=Km?CxB>-Yu#FmKg_W^9o-=ps%P#HWtX0~ zzgQkDK}|udx1Qo&9t>R-BeE7c*dig6hGoM6w>?C3m5YF;g0``J4ajPbmVoMYP6Vn?{jn6h#WEzL~ktp zefRBBb00CZdrpSwGXuZCgWAL_OGP7d7-Le|V>0?z>(Wa~=p4?@u_U~VQGJD&XGB4x z(Gya-w$0azuPu3n(JfMzWa;RvYLH6`Ct-u8`$3 z{a%q>Hi5r)iB+@M4Xm$4ZwGf)fGgvxq7o(LD%!+>c&f;Vr+&9STljQ;HAHL~VvmkQ zNpfz*`eD`HxDT<1Ycc3&kk32rzq8B9G+(WE2=yMam*YsY57b6FgCxR>Ti;Ri6&A?M zL|$GkJN7Ifb5^fvrK-dOyI10Rcp&W77VA?~?%A0H^YKH+ss0nxo>#SV(V?K| zSxzGNxTD3+Pa zk7(!F;k*lyc6~a;SS!R+mV`jIw_FQ6gI-ym!8GHvim+=Q+x68m+r$@FR5~f0CT1lr4JOYfobtI}=au zl&$C0@!HSNYJZ#g(B@xWkHuK!{)Yvz%dpsCDAP4D59_LRp12kmGPgMd&@_VIQ{sV> zBy-q2*d1Qt+U zU>WSN5PIucst=#_Sp+tvA$&~q`=CMyRvQ8 z3A6Q@BjJskkfrFC_LeqL^SaEnpbc1om)URsmlc@q3AhUo4|f7G=o)dGpn=r`*+sZwnPw%Tv z5v8X2_!}i{8DUo@DOQb5qYq6Rrqvny{&$E|{bv$X6Dv&|F{(*Qf>HYLY}80e;x)j6 z02;uyq#uD#>M3jD`Lv2%bQCSDifBLv;2ij;Z(`(;E~*mNJn?FI0d45E(y7=fmbhvQpP)uYLrpEr z;RIj$+Duy2DR2L5@nHQ4N~d`+s~7MB}g+K>X3dsPkmtdS3RV>Q&=5_eBn=-R51>=%+d- zQPRuYI%W1F9@`^Mp1lsRq7WwQPXoPmY+q+*h{HMS&f#Hghv!@h%V5lZ9w-ga*Q9Neg7Zz6$1S~qCu2j&r@!nR!=ORF~q?=zTcX`3YBr@q81x+y5#tBL4WVPo>}N?*odthkdU}yLmC36<3#* zB+q9^naS(52c1*QeSAAnb=|^xyNx61XXU5lWJB5BO?*^OM)d#v5a$;(QAk>IR zTAnbdy}NR&yGreLk6)46>Niw*s>A!19H!US z^y|0Vk`K~NcP97d>_!ErmsaBMT{U`|oBGW0r$Aef!Q`Uh{^&5Bhjo=`e(^$&Q>0@x z6#I?i#Y$bK(H6Lnk>KfOx^1p!WoibuqfNEZ*(wklN(S_T0oR;kVZvdNNH&lq(i#vHfb1?7LabF8w^rPa%hyU z0qGb9_8PpO=Y5{%eZOPx@7w#I%`t}#%r)0_=32k?TkBjaMNx!5hMOfIxH#El(N3Ai zX`p>PpRF5RIx|?8&t|%_;yv|kWsYVy;sR;y9GSVn4jEc({YQC`*qNj%82OfWr!R2q z1*=Sq*T-_t9i*-WwUIBH;Cg&_a$uT~m zuySaz!`&&JraV_f(&9AJ;BwS!UoaM5HfL<{)NA!VlV9rVjZ}Q^KAaZbrw}Lf-763) z7BBXzzPD@thq6c6;ZjY0@Q!z_+vhL!NDGT2LkvBkx5Gj9*>%Z^a}qYJ74tnFizbP+ zW+of9N^E`n)N9+wF=Z~yd(Ea27kxpet|f|zo&9VMT+K|f+@T!yE1`GkdOyq~dngQ+ z#pOFXr26Zx!%6EMt=d+VvV^zWiGq~{mZFkv1~~ZI2S=`s2pw*7km))uoJEV(tOuNr zIxmlC1d&CDqywap8%W^mSEB2=Rx{7$)NXcYJoYw%Omq5qWtCB95J~>Q>|1L~TinKJ zvT}XVyt(`1@T2m06_{1-1~>$$ zQmlwOuU%?pbdvTin|-!fb0ArTDYO+s@3N33xE-y}oKQqW|Hv-gA|+XT}}DEz*OzCg;R9oCml zNbRx^)x-+&DpUOD=$ExsoNb0pJ$5C5Yvdf_;Z;WMkJs01O`n-)ecja5Ur4}>>nK8X z2p&4REX$xN@0q}6#?rM6}v>j!&vulrbcgF2EE0#ZoHTY{D zuBJZy{AFyaNE~4}s?cCqY!FFc>dC{Pum~;I@`-p z;o5rAQe)z1h1CK)&z6uDiBNjEnt$eJywiZzNZnSRZO65Zh^#P@cH`QmdtMLyC>g?>y zesoooE*S((s6xy}z61Fz?^6v`Opxl6#nwO*K7-vA@fFuiKMoW1vtexDFwG=~!{Ei| zyLSvi#a2mri}Ih&J~-N&zrA&+2igpg5|zI2@hHc+-=6Yi&v?DC$~!4{A6<^MmuS9uL`pjn{yaq~w+J$abs4_dTb{I4V6l7O@5G zRn!Py3%j4ma;q_wKX$Nm$&5?dbaWw=zX+Em9$`zX>7FQqD}O3Qq)VO-@6O&k^dX=n z0bv)_-<9Aag*!K=-r<2;mG zitOw2VT51;L{W(Vo45P0aQX2*soB-ZxF{jqnqwnk#4d*UhYwzP3*OB+w@#bC<@?C>HLaPdG)*Ps}{+XZp zsPvYN)Q2*z^!UL86Ik_EVN?)dFF`o5z@n*->Ekoe`<8@ya=))0^F>6X9EvmE~iLY3B){3j846=cwknT;yeWg$+B|n?jyrGrQ2d zSu?se{-&b(L>hr0gpkmxB2{#?#Vz$8r;ZqStaY#X4NOg};}(KTxsHN{cMnD=rV{!K znyhomL$f=pre?36S3(~yW}YEHv}E1;u&uddTI==)YpkbrV%-umW zZup$Cfo7#yvh(ah_{x~qFdY5N-iei#cz{>9?fH7u>{;*Kw9zu<2E!sCHoZ4n26j_c zZ9>$=SH+%M@3!9wWmxsO7yTe!BQ-yNWvrA( zaYB8;&gZDdXfkywqanw3Q_XR7I3@!Fr(~iME-{`1v=oE9P}-rYq%UHfI+Eyl@D}*4 zrFR8{!2gNvYD-GS?`?M*9C&|2k}1hqE^14O{)vzXVQ736$#jc@+^&m1tTBUM(z2JU zyEx;@M9t1S*~?ULGfy?`OzZslB#HiiH|iqf6fkdRGqZB8FKpL)Z**PuNQ6#>bUjcr zu2NOeF^ITA!TSx2MDF1PYub9dy)}FXL#c<{lMHgAW2<8CS=U0di~c>bL|zKn;g7^A zYiV*>0R$j=IXP04e-dG+t6pJ>U7jhPtcclKn`Ehe9q-4Z0@o?aSSaQ0ev;_F-G#UG zXnD-D9Ca0Nn_LiW6}!zhDem#OA!=KMHY<_(rHpB)J1xSOGRwHDY`5=~>&_lFJ!N7a z7sbLo`@v!{L;9_3b8zbqiGCuR6Jaqb=3U)JUbC~P4O8&wki<#(TslZS%#^`yQ;Ytd z7L@3SlV%6xV_VJlyqVzo)b>G>h?wTQ%ZF7SYYrkKez>I47eK`}^M|HKk zT)OK_#EWMnzU_VTg4^#>ZLIb?VWJvdvy0Xxt0thTQj$%+73!RSdGA5fNkq$Y*-b0W zcs@Su&R8N*581WEx~;(T>ii#5ZM6I3W3~cd%+KPN%EAb4cr8sdC6Qe$Y$-pMWXlOT zx=zTUVf9VbysL8_K3(QjVBw|)D@QHaw0heMJFruUIm@A*oO%#jetL==7m) zb|3bYRm^1xusj<4B-oJQnCg-x4F1g}zT;P2nuksIn` z<&j2*9%9v89J>B8cKF-n^N5EtQX|Fh?d**phA5toUL&5*s0Ew9JLLk?SHvMEn(Xyp zd1(lhs@Qdg)hAgNjTkI<=zT-7zXQ$TBreO{+EXu~SN7u9(Ho2a8j=w5`&FSyRRKWXhBS zjhBa{Q&7ZOcN3AmS?f;Z654yZ(`}sBPyZ7_>i+*K5~G$QVaH z^;+XN88ja+#5R|i8rhD1O-36e9d?jW53AqaG=EFri%jb)uJ+mqB%CqvOBmx2tH~)T z&!ETalgAJU69c8~=bj@wczUI(aA!FL`4^YAy%cvUL+eDvT%xg|&au4-0tqqRO{0~B zAr^lkeB&WWL)Y`xGE?c%;^6Yyk%X8shdD)23VGsASQpHkG+^ekW()ViBd#8FY26Y=)M-9I-B`T(<05ilbvvbTffuouYW{ zPG}0f%u+F5_43fiE?jqNZfB!hD0^4&(I;u)V-gP#9BKJKlwGaHhXCaR|u#hs~Q>#}(;*=-K?yNfAa;o`M@{=;0{ew;W~*>=F` zq6xZ*W|^t)N`36H!k4|!cr;YH@YM0l7Co+f&lrCnUVS`J_%p0n_ zyl!&-{FiV!`3olud!RL5KRf0$y>bp;Y?zwr$yI{A<5^kYflWLfPQH0B2W;Z1_sVAn z@^R;%@wt{vJF^=(`u?@mcqs{C-oV{Fyb95?KBF6p`x(^&1#-ptX%j1R)bmSjQ$y0i z!ooD2Ripqbe+?Ri<_&M`H-(IKi9X`%?Zsc~p|4e6SX+y0#xD|a7$5Z%eoW7FfnSu zkdnEc8o731gr?eBbD*?l`Y|6Lmi(Zzhi7 zRGKI8_CrzTdZ5qAhJ~ zLSW$r)MkL0+otdg+dKxg7W-C^)X+Bly<#lCuyr9?B&0cd7{hVx6hC!+3HZ|%t z12flsC*fZF;d+3Qv%1yy$G0(<05yb2Gi?@m=JleXiPJF)6 zqhedLj#Jt$EzlEo%H27Q;0!M*@G_QFb#Ry%5qsuA{IXE|wMAQ~SZVEB=c8}+aFF6l z^y8T1VKbWq!V#5pxnqU{)_6j%qeGZq4sp_{a zos7yzfcjpZXDP^?Sf5f7AK6%+G8&+a++K#R?WLg4n~yb8g^F;mby6Z;6&#achLrLp zz78S^4yFtXlFI|1A#=3q@8yj-UT_P=s+^@^-u4W;Bwmf+{nwEij3c` zY_YPN91W&~ict~ulCoB9Vyg|bt6fccZ-pu@rUzTJixyZ1p*6kCUHTY&ik!Dj`sl^T z5`c3DyINl_Ku21pySXCny0Fz=xB0;#?|hDCgLtUf;xmv1~A6D80`rxzDa&y*s*k+^M;Y&&NwA%hqud{$kbp+pvRED~-P=m&pxWYd zI(mBg^FLw%q`j&?x-=_#>(_=2C3C$VSj2_8tSx zC)5OfGD@6TCw-aOV{$B@@?_3v!-!zV7wd@6>#0cvZBHv zAy2+WG9VKcM&R`NhXPY|>~N`gOO*{Qmeg9PT_<_qEP1fK2ln#=1+S^(*s3O%cd}`{ zDdir{D&k%q23^|=FDLstkT>|37lyXYxD+&VwIC*63w?PS0yk;t-Ok;~;TmKucQ%;v zc(|c+`%1F*5@gQFg;e)0N|K|hhoW8-oZ1?2cHm)YL9#yP^Q_^z1_$DkahmN(DLFV6 zQD|}L&IY*^_!~Ig@JC1_jt}nuLA)*j2WEaB!+{UO$o`Foi(W3$HR zTzL2rkS#ACU$7;d?Kp*a)3SJf_G@%an(M)Hn{6__%o6u*ck1F^-^wh+(iUMD?8|CG_yZr%IwEMDeXlI1 zU-~;7pMbN1R{@R~U4dg0y>Kvveq8^!h2#I->vZv}%-_!>xpy2`EOD7?;fm97D@dS; zv>C4`ezzhz6PhUKT!BBBgFt+egeJCBem?QL!6fftKb^S4`*#zsUnyos_Zy-sIw0sf^r_xS2|jQ+$kSqv+wtiS-1IGRLGdN>Gbzm{ zU0HrIema%1a2jzm_yu^UkS3Ep9S6jkt4OS+WU=JL;B%Yv)N9^$Yp96<)t!pVM+a!{QQ^S z@*n2}{xewpA7>@fp1{ckh@OWajDbe{R}mMpzy`?2pWA?=3%bPkKYe-(3*-N>M+#UY z_@RyZSGw)+DgHbZAbpkp9FNoZKXaD+rn=zm|N4N!|EdfB?YPH3#^8VU6(Q0P-Ss2_DnQl)G}JuwNB}huuEB*5V)qoB;at=akmj+kutwy4G;(!IR?C& z4gZsxW}dSur~%o*o?IgI5aUQW-XHKmgo>eUgoj*LsLsj~5NRH8J(I&(6xC_OV!1%HPkESPvVGe{$em4@pUp{e68e ztX!(c?5oF6QpE2e6>`xdR+8(-yB>k~lLQr~+Vbg>FC@w5C_uBsb)@Uq)~ajds6+a* zyI4p$H&0f-`!Z&J(^UkLz9~JPenUo&7qqYw)HoHsex-i;6#4aQg4)^V(`#6q$rA@S z6UNNOcBwM4@kuzmI!ttM6 z6B@EA`-l-sR053+N$2>Ez_ZXYxA78Vw0sK*2SBs)^7zX82L`gs%LV3l{V;GD`L%1; zdhNjaFbZg$v5yh2*3DPOg6))@miEB8dTg`+j*v|X%AwFp5I3RveU(`YccSao+co|2 zm6esGLv?j^3tQVzSL)vl^KXicQ~i`vP{)j{+8WTAT8WE_G9|qxqy-f#TE0j&m6@eT zdWj{<%F6y#Ciq?vUT(GePEJm(Kc58&Z9~=>{7IsZFGJ|4%9E4tZEbBmHahR3cqXci zkC_n(JykO@O3BX2DTO*1${3hH7?;P`mw}eKIJ)brP8YU>IR{KjhpZkaa{Qw`Egeir z)zvyE<5F!BXLikOd=_taKi zW@Z?J^-$4+%dhXliB-OlTKxiMX6ETF_fnVD@i)zU+e@pfnm#`DBGjSq?RdoupzgnH z=VfJuk6`$xwCX%7hX%&S*&l2gigR2^1nhB>?_+FEDYtP zbe*g96f+$9T6zk%0~v_W9Y3FCR#gez#(#0hiXX;E>e;bs@I80m-r>9%OnGWj_WV@$ z$cSag`beEuH6*q(A*F!wJ>8IV2nNv^N$>yhlliP4jF!5-K6PJtZmxV!i_E82ug<`l zip9X9Ho?|;VZSt)mXD|#7|=M@?d|PZSX;mAFi|x!G8&8q7|>z93k8d2=O5Nj6hHzp zXbP;`5ZNm_c^Z(;Pyt#EhrRAL3lP7&)ok1VBgukv-(SdhKHN0S5ihA{vny zo4X^;&;Vn7frY>cs^6#U0DIhkXHp5;OR8*|uSj`RC`bDFw?AwW>T%p&9(p`7+m*<% z5a{q`i_5?R!-q5yo?BeBK&srm>!-^egE|(?41?o*RIV`W}R?L`s7eI z6RA_;bL8z-JDzk;1jZ4l_wK(=U%1D35n4VhC_k|DTQK|?_{q~G@>s%p9|~|oqR4QD zmwNY!TAvm^!cQ}Pv0CG0+8s(}cJ|0I8yfX%X1TcE4dm9A5Q{KhQ2jI$Wx$SH-}AKF z*}~$ov$K;=Y|ouwiIm2Yv^*)ytf?8z-5(zqFi{kak{Up>vano;`9UcsetZ+(EZ^zb zE1WJXy2u_Qh;4S6fF0V3ab9{(D8^Y1d3j|5#EARM1hx}z|2+Ro#we6&z#-&;zJ3C% z$aI+hOxR`Aq0J)Rso&;|Fl4KOsf$&=)k;Gk+&C;EA_c}7R(Pduel+&d#f#DX^(R@P zy5atKBSl}Pn32SY;%Al{;QRZ4|F{sBoxKQLq!6>%@$ZvE7d3AZ0A zdMej~0a5C{`}6h9Mw*Di`maCVL{o0=d4doHu@_O9S5l&e>g(<%Doft)G{d=f2*0tD}OPoV>(!?FppSw4zPEAI=R$R!=yPA- zj%?Z`GO&M@6%`d*qVapb!kinYMZ^5`9l*~INs3<~@OAcPVW?Rk6 zy;um7JzFt(-5iyeSO9gWDx?KK_<>x{hW6o0uUXWa%|!$)kuHfL(j(#YuB3uhLN0tJ z9YPu2yPyV9P3^}rLS8f%kIYa{V2eOmuA(6;yIZ$|@45cm^%R4A8mF$_4A2O>{c%vi z*0#XH(z3%EbgoG_{}f<{4+CN$TNS{DaWzhZ9!tD`^=Ju$QJ0eoTz0OmGyt&BM<=ge z;G#y5VZ0Tn@zAUWomX8goaqBw1a=I40v>X5Q_us*lu)VJU(7+J0^Z_Vse>Kx82!q| z-27bS<-$U?;oHiIU8bbgJa_&Vc)?kVHL@a9M_HN3E?u(1fx7|_3>Q)blq@*~#n*f$ zJslm2l7seE+?Pha0q^jjovN4nRM(b@hpoyoE&U;AkCx}Dx$7W(*5O7y9yTZA;X+gG zxE<~PztMvHO@h64)x-6u`c$EKbVLQ8NdJ?YevIVCLy|*J+rP z1z2nmzL#YGe^>W_;8e50V!AkxU}DB`6o z9Te}2JJN@Qn1kCgs)@FBd$VC9knPy9naV{nh-q|w0JIqr1z)Dq2rkIYrF8NH=#PAi zK3@u14pvub-u+2wS^o@$Zqv6&Pdi#L&cOP)=)5FxuWDk(5hPktHWp~MO4OHD@q zCs=}$@7O3p5@ikVJ-T+ed@06ACI}BIy z7S)+Rt+SR>bC34%>YOBa8b}tI0Pw2GqVV}sDzGbHso9k4(0KmxtIPv5|4(&(PSaPK z+(W)F!GRUh!IxMKaYHS;y8w?tl+Uig8SsS{JApftD#ti0Y`jQg!Xejnt9Hx@Z-mA# z8iGMN+tS*q?BrBhqhFxQuGx+|bLLF@;)LJ3wz-bDTTJ^h4^zeleUiaule7*LU^F^< zcC~@Na0CPD{F2Va+v4H|U|OL5LRaD^ml5j(LO^2wT)U|W6HCq`70jFu{DGkz(t8*e z4}rn@4 zyGGpGE_W!2BA$aSR#{mI%8k1m#Ir0)NJ!|*Hjk9tFlO6s=O3UY^@Fb`V{cD5(FvAD z#^Ns5kk%^W=sy4zLh#Ds@^a9f2mIf`Fa<&kaK;sW9F&(|fBW{0PR!fz2`88k5XKC}8}9w1JvrAvkC$1Gq=St_b?2D`q{+Bj5DbtmR=_p^vfHPGjIwZWpeGvxoql{G z;8_D3w8VZ!mQAlp4Qwu?Xw)-6Qy|^8rX!SM?Euo70W5*)hf_k%=H^+DCMzk*z*`Hd zNpg|AfV*m!n1usmNQV6KVO;nDfyBssfxJkAgF21eM?~v8qM+u$f_OEbTpN000rb0y zKkQE9Brt9($87dI>)s%*8eI}MI#M&oTaq7JgB^uwUYM3@cUYQTNxR-3XmR}P>E|ag@TPV5Xxh!Yinl%#JX8(P_lt+atd_9lM}{_1EZ&W zFB&t1;?qppiKmFZqfZZdky^-TaC^Z~46Xq3`My2L9psPXT|AFtZq8K>i!Q6SRy4gX z+}otvi$Mph2S7z2NlEPL0cEwA6hBZn^U7m9WX_xG+LCo=qPY07n7WRJ%i z71``(kUYC{8H=*Gz8|SVTd|q(;1}~Fhb4ZH@^b$&H;KFFJX(%%b z%gmPh{d|E6Ab*fO>5w0w31Mhq2`EP^w{@`0X=yswAf4!>RDFbxi8ZJE zsgyfiAMU@tS4J_Ca`UPY*8n!Hyj=6QopB@Z!ztOmJwSMZlz4W*$6BFEKW0LB?#73n;Ey^Gv8$fmNXJ7=T*h@|rT5u72!y{qbXP3Trr$_^%#CU9M zJ@(!sX_2uaFL>N!iG~k4I9%{lkJRIkEhpQWRih?{gjbR^G$37LKwtbR1rnU!YVc23 zg4%f|_E0dF{y{iFA(xcEE9`eOD;nFYrsm5#fm}E3x|1I$)6gr=^^T6RBHe#|KKry( zTlyx{KgjW3k4Tj1$?TQ_$ll-I4}t+CCU3b57)a{Fnt9q+w{*j_c6-jR&0{cp20Bn} zIsa)}WK6+*Fg>)(tYY8~?B)!G=#deMKD3OL%|xB({1!V%inO$}+5JYpg6b)^F}X&! zGm|*Tr1Rz#zS$M0UasB!%lx6t*-Mu$0m@liS-CuHBmXlWL~zSg(3Urs*js()p{HD; zigcZ(fX4wzgx;y*?z!Cq^W17u48^zGbw@%L<`GYz%{R>%eqOWLFK>LpMS^NN^gF7X z3K9OYjl3~;La#`Wuh($x+))y9SEvKZy6QH2u zY0->ercn?VZuh87S$6^_Rt3=VflglK#tBM*;oaTcp={C(C`l;3>(A9#WH{2DEEOUsx=cuofX#5O6(>EO7QGu%I`q$wbM}+g2>Iz&*#734*9?& za)CKxc!1+>Far`}7?KY^oG2tcxZbs-UjvH;q(%3sS)MF#$XsWIIRW7ZjGxInJEQaE zV>U{=ybZ23>)%*Vb+@lv36y-Vl(|?9;|Yn9{Y<(N^1w)fRP9nCO;8-`38TPj%|>T* z7Xd=$22p_r+&O~C(uR(H(5mbl=4@)jl z&avvxp|+Xh?ytvqg6Zq4OR#dZ+Ev?5vYc1VBl~0=nTZw!!Pfd_nvwh7F-4PC;>K+x zo}*+73G9X! zx6UGr6QD{D@}E!J0bA^9=>$~V?y&ZE9ScOec7orKmYP8zoG6G$2h3_^X`YL~)>taC zX%#Un6cR-Et?3G++cc{-+`8VR^W98j&WKf-$cDEYkV~1>5OJ9gaKI0p90MDZ0YU=d zgBdKOpS=@C{$Ds}|LEmofKXslg7!a8ekn=@-aa0I!sNFw+HWER%UZKPetf7hy~7G* zO?GbX#bNPZ69BlL4Ng&{@RD)pTiH;o{$8ShNhj~xlU)CNYz%0j)E+)e0`;J5n+j%4 z|M+-5Ff4K?5NNk@qXX7He7W}i(>%L;Nkrgw19YI<-NOUKwK|k#ne})EJz=<4ZZ-5I ze}pM`Gz!|JUGMGn{mRBCH0smBykCEJ6q^oX^;kY|CehMi^n4&H!Sn=tNLpfJ!GK3I z06|b?2NKrSdGL}}s?(8=e-B&}tDIPi4i zU$+2hlWgxw0DA^ncJC+OJ2~!aG$CZ}Vga200nfe|Vp}wCE@l zVXrG6^hi4Y%wfH)2tJhj3u8MlM)$Ea?8AciBd(jd(z}?B01_0>{UV%^NHOQD(bRK z52)#wn3?ein7!i12|dVx@U62m!MDY?ojAHHJ{??H$WoXT2Bq^;Cz|l|9&kcn2KGQT z#lOTjHPqD1#%tWP>pb^f6!2Z!oz+j46;}DW`%Ea3PR!r<$_aAYyD4k&p(7Z8*~cQ9 zaR1+s8ReMU+JC)Xk)NyT!N?7@PYt;+MGA5}DAmq#KM}D(@o65 z7_WBNqyB&%Q-Ob4^gB!a$55Rmntfve&N}DW)NW}gB#L<@e~CjKkI=$XsM`oBkL&j~ zfXU5#d4VjQlaBV%>1iG%t*wX&G&1|0T0Pch7M;eKi=6tO$piBA6oR$EzrBhv&IQ*~ zmSO3Gk>96m1aK=GoFUCq%64{zsSTku0#DAnjGAGxcw>wx0_8pG=u3VZ_o|lU2S0u! zfrv=PVRxW7f()ED!IwUPLn)@GF&l6UX{mB9frsQV;Eh;82a_+8hQB7a##Bp7OQ)H4nu;<(GU7nt;T;;O`}GGi98OG^O;sLw*}A+J>oP6E zhXh31gFyrqUJ2C0euKpf6-)ESvCVcr1xFdc((o}Gfj|s|WW3Z3+?swO($p26E(ow9 zCo8LUv;eHECtP&TyXI6us(m*CKU+hIR4)7qjeJ=DD3FUlrWP8;vj$F)9FNsnO}t8e zI4OsMta$l>>{T)p{8kS^955_EKw|&*?+=;3bej3sFzQZhzJ32b50tS5M4m}~kp*LC zacN1;&RD|z*om5++S8+g-^V9d{Hm&}K}>W!VePKaWe3-v^3nxf*i zt%BIM!P3@NfW)9MWv}?f|1rE9pYSCF6*1~AmC1$)G zwEVL5-V^PT%U(54m#0-M1>D+`O-9Z$y(&}yFC>_+sdnhGYqoU9NPXc@`FsPp4!)P% zTyWN^T3Y4+qWWtX@GC$sMzFG`0ieYm$eCe3QBl#9#hc5cm8!xnD}514Vp-+m?sBl|?qUcC+gw+CB&Se4rngCn`7zA1r9Z5 z=Q5~&u!)^G(ucEUZ}+r$!_R#zwM1J$j5P@K%IjSuN+KWdjm^tbfH?#EF%MK*QE~Cs z&(}2BwY7Tqk$9)6f1Q5-+#6%wSPZz<_|b*DQh%&W)EgAQ(Mo(yf#@muX}+KPigioE z)TRk;=?n`E{v;&pt`dy+0CPli5#FO{zS-wEHTOvDd(HN+B`P`e7w>Sev02R%HVb1; zsm-99>?yXXgza7eA5hq_|iUAXM(5> zK!S+W3Uuy&HCyUplhf%xh4ECO;WzGqUJM7z1bERl(L_y2S72(GA}3@XGf*E zKiQTjlVP)yq#_${v8EycQTA*_oMtWXIabBY_Ae%7{7nNewE6u7W@D(y&=5 z?-Ta!TTC-A}(_G}ZxLb)-SAsN2xfsK&l}K-@htFV|Wb1}cJ_ zjgCI~5MI3a0z;N{>C^PXhYx9yBvt%$baMEqHX)r78i(b;6mm+21jzN=G4c4RA)NTe zlJn8CkXwz90d>mX32daR9%C;bVN2|~1^>o%kZ#1HHfB2_HXp!KzPxFh+jubA1(-f6 z^+6yhGN2|wTS=+Q_5P_a_C3YG>H*zrP8r_R`6lPs)7j?dM*w*A z42Hvt%L2d5FORsiMY8!{B$FVHcNup>r2;svz)}O51UsS!(qbFi=oFg-r_R%#4t6&3 z@w^d^ez6p%mEf@=i0np-oaQ8NRY)y`5RhoG+xs~0((-c9i%gjo*1sK~$src!a_uQ{ z9(n~e3k(3iDt{T=W^Z;K7y$PUga0cveA*01NbrXZiE(n$MzuX zfBPghL>d4SINunMfRYIV+K$;uRlFmY0q^xzS1*6W3f_fC^iu!+*3$FCL?Asi;PEy3 zw)ZAag^3Ie4OQWH2)NmCvjX-0l@kCnpu!O=Q^onxaQQ%fmG3YKGzE|szL#SK zzLmaXsocN+9REa8P3SYfwAYE~ z8i;&P95%F^wFjjm?Lc*pP-P-Y4~pbV-oWg`qslOBH{esbB9m;*tq3M4-C z4Pr~K_rqKzfhdLizf$~h9h82=kMf{=#8;5uv)Y#6AVN~9$AGJQcGd(&1>D?>$Gz79 z$0|ci3e^-)hRmVODfmwDrt_-AXH-^d;Thh33#$GCFxTg~PGTAFxw%z>ogpPE1zi@Q zR%&UOZS_)O=bv74EwSj&e$jM}RuJT2haUp86bwnv=jUhn?%cUnVqjp9h}Tl{)f2e< z?bV3_$Y~vgq!@vIgKHCuot}Alx$FYCFT%y36{gqIhlG_;}u`WqY z1eO6cS2tOk*|&oO&zZS7X%WSmP^z{msgY)j(r*@}O;nnO21aTRYYG8X!Z{>Je|cC6 zP8Ke_CWieQP;mtP!>fWY>!$6q)UFaxsU)=8=k^6Mc3$b7mqI3hiU*rzHIGF}t2sp1 za$z*p)KWn<=s9*jK!*I$q6?ONBt#U%R9t-8973f`5_gM*jjbQdA#j{SmU$vgE`L{i z-j4Try!|&OCzJ&!9Lvi@O6lSUA2cloK9)&| zfX{lyQOF4tp|Yhh_;4}-GQq;tRpQYA!~_MMtJSo%$={Cho%4;5`+`75w zn67pe2nI?W?m9ZdD0*zwGF6XJ7wvZaS45ncGf1^j8y6L6Ygzik*U{|V9sW;^!Usws8KcTR_FfMh_vnSIz*5nUUZCv}xIH zYLY_Ir*Pilq4%Eh{C&-zG^jWk;nn)1I)*gdm)`&UGVBfx^Yzg7pr$tycm574Y=sawcygQpf;OPX$KgEk&i_4}=WFJa7Aq;F z@|QzFsqnt>ak0fmc%Tk;RYWQz0|1eFjv|Gq-tO;lesfns%wc$irgKeZJ(ZW1_VUDT zlK=w}d<;NFKi(PgZ6h~C!6MxtE@o{~Sc`%yNlw-g};s@*( z%r%>yQ@=VsT2oU~Qyz)T>4&%-W@L6j0S(${Ze}JXRB*uP`svsB4Dxo3zZX8|JLQLe zn70DGb^19(+GT(B2kJWN>(4N}*33V?HR25oV2(WDm5Tc)$6r4_Uy&xJq1UJP{{3Ol zCH#j#X13at=j7(HT1(jiE&&}2zgt3tSJvg<<3O7iJ}RkQ4M_8E!0Zq^&u#kg)>o5K})Li+?hnVs$ zU32rEkkIe>&Xp=uC&Jct6TOIcmy5f2y;|ELRy_6HXD_dR6?WG~(NT+kGK!)Mv+fdK zL^QqP`)4Tlzl3M^@Io~B+jlqNUlpq3^k>j@$^Y;Q{GOfU^X>od!@+z0Bi!=uf$~K% z{eLtMX?Um?EtBXel1g8&*LUQcm|*P`H$tYbo~gA_mu0IxH(w)IJSDcWndS7=vLcpy%! z#CmJ$hz){RvAr3I->{v+i-LCdLhBBzDsizum8}h#T7IsaTTFK7JU21{DMe8X6OaP* zAlhi*B&}E2Xv5oIt!8Yl@_+} zj{Nw@&!4o*Vb}kxPDg`LlBv#^ds%-TOMbn!z<4SDB-i+onZsK8g_RQl@hvjlxB&xIi_Qa^kXbO7FEs&i_+ju;&snDlx z%^Plg61S^KczUebKj^}>Kr?z8@jX|W=7}ST@sOx3n81a@b(;B%u9Y`J#Ddog5fW9j z=OWz{FJ8*pk1=)pwQI)~&vjpbO*b>JQ%uJI0JvGA%h9ehZZR0Da2#|AOymxkCd7Pc zAxJ$59vm}RbjFMd-}p6ODOgQ=T35l8=^Gk5$gVnC`A8!%u`HCD zXj>y5^YbL~>Z-%wwXw>>W{SV0_!FJo%h=gSoN)86gtxv?D>X1Aa|LwluK(Do z)iZgrkxfxYQc_@N)m%nyooDf#rOSPD%t?UZCOhqQSc5uq9WmD4S=*cULzjNY6|C!? z^5vBI>uCa{k9t^%o#%!Te82Oc)^ZK)k_BNbgzA%`Tka2=fZ-Jn=-geZT1bg2yl23evh+A{) z7ga~m>}xA5WTK6=#O7Jc`wMHop3FZ*de*l3qT=X<>+{1R=yIY!=btC>7c~43~ef~=x zlLk+PO9BS&S)`rOL;yBb$Nx>rhRGZ){ZdButS=S#S3~k49w~LZ%Z?q|)%g|9585BR z_HAVxpZi54E^-68zj!@E8i)oOBpqppg@9D@PSo*P zShthlnzzadbToB+oZ3StjtwG>LEdI=fN+QpBu0qWk+xC|-*(HHg^- zD$V!Y<13e|k_>GcaEhBW0@5zt>uX{9?u&wW@w4yxOR4H-2p{WNKjh4`q$k6L0_MI; zF1g6zDC4yTI%MdI4;_;oBDx0eCjgeA;b5=0ORU#4(JLKqYPkP=N#@wbR&FL0FmQ+A z+!@3*7W@-au^{EBJPYMO_$fz3n9g?o!EBG;+4JvAMcwy5G;h%~i>vhGTzLK>H#Y)p zkHM4f9Ncc|zBT|b^qa6zAlAOJBwV0)W8J56X z%X{Bs6x9ag@cU^ z6)MGVqmT#l@yOwR!I3u57!VTv>68@jUkr~-s#rOc&W6&UvCp5g3L5W@(`ztRUBQE@+%nCnhOy;VGXgV4bAx2vhc|li4Z_fPfA80?Uzr$#y>FVl4zu!JN7xCp0#mk-x7N7K zmp$BFeGZ=1Nn6kRsLg6Zu{YJLcx4Lbkn-k2sV)rHJqud;5>iUz+sa|NCE6UIIWeMF zzMo*!n05J#Osa!v=z(j0p7wo)u*#t!&;zM~L#xX8gQJC0GC}Hx)zXn=f zpTaLB5MY2IEPb%!(bzq4^1hJRL?jB-@!Q;Az9mmsF~7#^YbhX_3^!`)ZPZr=()9+` zYOk4mOv5onY|r)fWFdCKTL}e%(BmFR+Aod&S9f0?Pi4FPy-_F`s;3l1YAec=F|$&% zsbrpqLgsnz%tKKsp=1ad%1nkr*g`54nPrTf%(i*9*}Utf=bY!9-#NeG{e9kl-uvTu zKD6(B-`BOSwbu1r<8_Y?TeYZ7g#E*4{W~N;A3>@AQ06a74wc=|fxJ~vZY_k3Ny2!a z=QMbquHq~KqO`&G_13<0LG7rNk3p$d`SZ27Lo8~znlCIRb$rFg((RU_PZ(>}#L)U? z7m1~_`|SaxXQn41*sUq%r3jK|U79PNx+4uRkqfq7#AfpK_g!8M>2p7B8@qIl6>fx5 ze}~_iHo_Ka*+L=9Sv}z!m*7Hee*A;8@7KT4g`tT>6%kL0{T@2k^KQzwTt5HWQke@S zCkU2iG|?J4Hdznym^0-CpYEtLFpo0Igv}!T)^y+VoXLkTIW%SGAEU?K!HEThS|cB$ zPYAhYN=jgFa?M2zoVtG>Ht9i^#L4&b)lAZUSUK3?VCnL4pW>XX@T>rW7A>=EcUAz>C$unzjCtPA&L z{iccGr{!-lAh5B^ZB}U(fl^bT&0BeF$^m#fm91p??sE5)nKj8m6r!Q)2yfE9aMOo$ z&o7G`$L9EYnI!ww$ck+WRyCuZXF@7Kcx-G|k3)qa9FE$Ykrp2_{zL)Hn&^g=xu`;? zEgbJ>1!6OJ}9d=|g|R6*THt8K0E?C><{{duq%{L1 zh86l|(9kTdzcC%2PkNJl(ZXxB?&~>e)HBrgmZ!4~u|oRG(({|++Kkg}fRSHmX_Zek z=wm_GMJJB>EvOb)m-MX_xo$zg5!PwOl15|~QI2Ack!2;?`Ls%$ns8E)lqq zSRfl?iE-DkQoa%iG}Fm$gg1fT0bUU6v!cd-ecZ{hT#5kykmb>hQ`JAjx}8vFNII4^ z1b@KnJbq%=5+h%f_0p7PN9nZn;w5IQ23?D>Zxj6G=t-~w9F~@2zkadABx_-$+kT^Tk2ra^PxqutVZuDJlboAafg29?{BmWpumuV$13 zvY!hn(r+#rXLgl#94TSF2ISKEwrkVq4>=MjVs4HRn^sNLW$7}Pfz#yJ>F2PqbttPy zTf8%NB7a99b<>M`^U;qOE;csml zT9XNz8`BaCeV!hW1l^6$GL>?NsJu9wDf8lisYDjdy}y9 z;sSl96qk*E7f?TAzzCq^c(}Nb&cbm0*A9UcD;BmTt~1K;|73~Z-UieIu44(}cFT+- zZfJM4BU>f-@UV*qAe|+$uWv6WThfdDCv zf4CktSW3)!&hEnFjk;ogXZ4#EckmV`_%VTT6^R&E5TJ_fp3B{2c}wn!XOr$}Jz+J- z;$Tp2U~$Vr#q0s9<&W(Z{E&Qp+gMOr6lxs(IXj?t&h);$Uh#E6zVFIqygiy^rZ;9o zN#9u=o_DN3xr=UoqmGO-c;Z8OQ%jr%2wPx!v7dZR+2cSC9sc7=a}4PNW{Uz1X}&A|fG# z(spZ2P{R^#>%~jAT~CQ$K#c*KMApZwxV&5jDX^X{TQD}fTPbqwVgNW+>E9QE|BWF=p0>)a?h>(VI7HL4$xX@*hFcwv@o?_(I zzq8l5jyLA%D-j0F-Kyzmi}+^9a7Rqk(~^?<#^iBr5m3G4dct}zVmk>noRd#W@WEe` zC1c~9st;!{Fr&gCdt1=Rq{R#@pt{@&1#ov{3%OYAn4G_1Tw&C`csKgWYV$yFZ5%GA zr19a#z^$A4wk|2)Qn5TKM`ouVPcdvHc4-y)EH+)!?DZ8paqj8CkDL?otE2uDm=;+X z$0E2@V7NwVsiz2y88Hrbuos$YCQiM-1pAucbfbz4n_8oOFS4WId{-G428^Zuz)qhO zAx-Y+MN*aqPdBKXvagw1se2z`#F z$&xc5uAs(rxcVkU+#t!k0|NKSkoiQt9E!t} zu(02fF#t&pWb3gxSG>r*s@>3$dIt8tZr`ygfU&w2Zi(VtnAPfy4m2jjPNcTNtS@~7 zz@IMu6|}(PMf}-uI!y=-qYUe z^0Ydmj(coUXz%Hz-?>n=`*>|~fo0Gm#s^2K63&gEERB0hUl~{O>%4T)m5^%<_Tv8T zF1Lk5c~(ALr@L@7^KoX|$K5TRs`^|Sk16O1Q-0Epgtq+P_EutVIipSTAq!!02>b+| zj!R>wI;NNF=Xb`c3pv<3+7f)8vIxy8ktbYrJ!YSBc0V0x%VspAPT@Oth_n>cCZPX* zT3N(L0ag9gu;NllS8OofC&#BxpGx4yHJ&xbN#K+cpm7MZc0kC=5WjB*_(hO-i-hOW z>*-lIP+=5&CgrlrW37q=pEbLvj;JF?vngYwDQE>hUJWR;>vAY!v5j|V;mwdJ*z|K2 zv$dL`v%E8Z0%QG$Er~VZcASxZxq@V$(_GM*3z2Cbv*8PCONB3_gnE|=yiVS4wVg1# zuG)7or3-35M2|HnPR~leBa%ZoF&)>Gt!^8TaZ&lCMTP!b7HvDR_iWNqW`rmOng&Z- zL{@XKnm*Ta>iB|0@JBxJ=oO@QjcldCI#nGY!8z!qd9Ne_OeUAVoCqBS=`!~hXifNT zt$Fk2tGPyA^0dTx| z6n3-iuT$1DoYQ8{c~RkTj7iBSZZ)0%o0rK~(-;w7`z%xI%uG!^o!5imyN-HH%C8ZZ zdKN0`=3b_;wLY38b{AeL${(l1Um%M&G}59J#NFqTA$=pMW^G|Ci9`PWR>zfFtZg{2 z3$}(oy^7zqJn0b2?_^J;CXdn_TW}gV`LSJg!w8L0iWASSH%+3xBsTtQPrx%P7Yoa|Nik}# zB2(|y_|AZH2d4W@b)53_uB#ge+ZX;q@MWb@bbZ%shgsCbB#EfdCB811PTCUT@H2 zjXQVJ%f!lg>1d_C2-_EY_*`D)fL$4Dfs8;dV-rOHJ8)h0qZVh(cuP*fffO98+Zun# z&I?yPW1`!{Cf#SBR=@PMy75MdE@-iPPK4KgIg&;P8X9zbo+iBwvIp9{(Ms$syi%p|`f`|lZ5 zn$+DeBoruS5|!9wC0(PnYhokFR#d4Z0qNL0opsgy0>z(%;ewF@Zc+-xUk+W1bCGzQh;W96Z)+r!X?o__l;OBGHT60pQ)}uzZ*6bhCC!La;j67{EIcXGkMVvX=VTu^VCLE$)~=6vPm=Dw z%(r>kHZIeG`lt-aHq`Vi$&giI12I*Fs8;laykW%M3E^-kVN=)Bi-AMGsuXHI*VJSI zH%owsdMr(3K>jJ~bt_Y5g^Q!J-0BOTw2d_*KfQcUMI$VIR`s1SpVdrsxSLeBx@!O0 zk>ST|+3wFyip21r7F4Sf6f7{Azm<}smtubSu#(L5ytILChDzv><^;OkXbgOey>ap4 z69P9X2|E79{ak5QB9E;`JW^I_YWoKInPwDwXX?D?)#^oLo~1?8wvN1Q8k$)q`bKhw z^@u=QI%A1c(TJ+Isp}ZESyq*Pi+##}WZcqFE)R6pm)qYhISpM87<*9&v_0XFFL3O? zkW^>6mqW)C5(*)iSmO4?b2W8!ecKgiU4#4edDo?j7rBIl^1Gp_ju8nQWfGPp%{OE|KPD1@2i?sNtIt9mMde!GDcs&em$R)H~H%Li9#$1+q+i9+>IN3!oV(@Q|P%&icb*oDBueX zXX+Qr{Y?liKd~`Dmf|Q8i14$ z2M^Y%_cM$Ow&5xav+vru>n#LYP)$kE2b9iN=%gFV_~_nTE4>)1QhF_y*Es3by7VMN z5PqDF5$mwaRB^0EWz(QybCFJO?T6AOMz-{TH+RTtyHTTk9GxLYC879Q*s|$Z{qwW( zP~1lEHUVxCVY_bOl+^o6Yy%I^o>hYNK=Z~(6w138c2`tP9~m7rKz*x{Z8X!B_!v2v z*<7nj#@8Bf+A)aPWM&pF$!;W`$*s^EP&*~+`2$bLLFA==YMCbSnzw)Md8bLjK-NZ` zKg^;FRVjF`d`kaX&HWdmp}j@<*LU{JX^e@t56;(w zYVEV7yn4>3KuyhXAlzfouw~k7flb~zEGCDJ_T70W|HWq`%o)HX2#FM;s8FjMPkg)j&B07qSokIrfNc+;-Vr)Sg|h2lt@SaFhup2{&r=0SW@t%}oTx2&fQZ3I)I zCNC8~%oE*436_&CXQ+0cBl;_6@X3k$ihD+9T~zt8l!qQ0n;6;=J1RQPN-^1^+xg3> z#KAtI&C#L$==3>G@@8p9Y^8XT&Xpr4_sQkE?3Uh1W+ zLDURqYXLHO{CXFNfRwL7tvJve^daIvwXV9&^y1_l6dq2o+F^_ z#ydCZqS=aYa)Pf$<-7OzTWH?m`DRaHk@;bT(CLEB`VOm-jj8&`*KjtH_f9H~bcc$| z#>*olJJuNIjYahe!te}QU(TiComVh`&D5_0!&0+J3WhB``piw=tFOFQYaMR5-Z0)# zc#7;19PMWQtfoFz_)Owz4C(Ox(a!c^3KTY^NLeY|P$RjYih9)75EsG6F7RPA40Vi0 zH%)dx>1kEi@!SsgoL~v2+vT3BzS0YArl(_ZK}*c|!3@VUuAdGP=7CB!GczyWa)#P3 zg?t;PWKJwynYu(SoEhTm1{js#Q_$l|51N@v-286!H5S4sscUhgCMPWPfE)RFu z!MdAI(P?*Z6z9dh9h}y0k+9kLAsRfytQwI2?ghd0q*2D%{keNaSv*EFYLe5~=xTq& zi66AgT3Wv{+QY9#XKd7BHIJ*-LCKAr)6&nHcy-*ayTaHNx&%pj<4H^7=2%^NuQXSO zlnPR2vpH6hFxJ~My3cP0&J%gdgVcYZe#+CfpY`o3Qi1<+>j&;0z3c5lDtNd4ljgiP zy>NBmjveg%Tdt4~flRM)zOJPalg?k&)swzGJrN;_wG13V@1XKq;@EDNcnrlJ9(AT? zNR2&9Gg>sZy6W>G!+j{M=XO_1RKd?|@)~@f`^EzNV}QG2^&!L>*q2GEBm@+B!#qIURgoo0q=vV%;<2;dUa!ou2uu7c4Ro5y^+ zP)*d^L-4a7A*nn4(kM#)LS9*;a8AAEIf*`};6rH*D#lj%T> z&FAHE3oHiZ$B`rat5xeB=x)znok?V!^EnGwf@}h z-#MxgAH!l^YTz+4-xy()KlUBWg&lE?6dxAW=dWyNZDkGxL^Pf`gA7@kO~r6zK3XUk z2F!&1J>a}NVjX{B-%v{jOz4QXn#z?ImdJRHZJp)@?-Ui19s)gvhU z-feRFag%YIKH2=ITfU@ok!{!A=+l7MSjm7`jt@5r)93u1lj=}cQXui!KunJLr=Gm) z$JIV*9S^<7d=y8q`0{yo)u$~3zZTf7tR7AEbd0`5p{O~2?4juM_4vNICjXd^N+6<^ zv9`qCkMBd_{i%TYGS6clM00bktqb~w)EY_}Mu^kJ^2`epR-gb;{{JMVTQuZ&IY`Vc z+{kY%SkB7DM%AZY#ma~k?k_AP<-NYb$rcLme2p#PAbajvDw^@yO8WtG*IRF5@{-z8^ zBvU7POT80jSzQU)E9qQRXJ|}*vmxq}-Fl5|fjvPcPRd;ka*3?JEtjWtsi!jCjs5`^ zqj6pdT12zx6sL&0freQ!fF<1bCmb<4I_W%t@+uW;IgyReU(%g8jxV-G-MWw^w>3(R zYjNU9BA(0Er2F)zvtNf7rYa<`i@!4V-CQG0LvB`5efzP_@Ikr{k=GJ(I5RTBlG6y@ z+NZ2-g5BsWtGd_MwfyVnd*j2C#gi1XK1@a?6`%WZfKotmo0;UIoIBb^H@)T_PI(N_ zFOD|L=k_`U@}0ve`EJfBUx55fFrOZ+dvPi%(yT|fmwNsikKNu;A*s z+|H;F(gxdc%b-($Nx#)4pK`QFx7Y!c#Qtrym#A*X!`F`7JQ~E`&R-p>y}n%D@oP4~ z1m(8`XRIM_$LdVVZf4kmqQ7(rZ?)oB_B=8*Xl+RZ&x4YC{#z0YdZM7_f7;o~MmJi} zPPEKvByQaN%^|~s7vz99c3Up(IQHfBoJZVS9dvGo6&9Mjz~!=1(_wt3H2reIQOVR& zTTwe`4sg5i679kGlb@@Bt=ju*mEIjeHI=N-T_L8Pvb%=4<`3!)uIC?4DTZ*tH@_2n zvOrfaX4&LU&b)vrj{VijZ(O`+mmS}W*R(pJFscw+zwGAeKQlNwDjG_`ga;+mzMUJb zHw67+7#+G^b;zt;k-c)IZQB05Y6s)t7H$_~X>2-rto4Lqx6~3m>!4kCva4YZb!d^; zx8pmAQYZMzb;1@#?%ov*C4QEY5D6jk$2Ci5D89!Jng-NUFNeK7!hpDXdSo}>H(RLvnjHnl0%Bgm zWLy+N+uviJvD7mkIGh3IW>|8YX4XPZ-FM5JPZ1Fw9Yb?*?7uBefj%}rCKC}nvc@RC zHw@X&B6v&;S^4BA@bu!dfB{ktmfo;Z6_AtaEkWu76>SxG5eU9IKU zEM0EtHP=vf!o<8-x2>KtsQ{)!h%2qvjeOdi$g$UNMt)nidNm*cA8PBN+Ah1TO`{j` z0b*o*%Wo_)0j1NwT;`2~DmWOcV#7EnHY+dN z*SFcE(i?wFog9hzgPm~f8#Pn>>(&IPlrJ|iPdkWG9ox}7rs}!*c`zlvS6q)yyN^O0 z5=8m}`1+cgnOQSxP0b~!=RyB!YdH1* zS~(zop(jrgk(%hUBNtMyU!LhZW546poKO}H$P$Gc}#bmn;1xzknOH+p? z6;REyOz^FsW;lWX0Tfnz`)L9Xp*w@|$UHH(!U*bZAjg&bn2+w{Ok7`C#Kw4*LR9l4 zf)kUJQ6Jg1IXj>jx(|c145{C>g3i{^g@)&#@^zyirf-KT87j-gz4i2cX8aS&saXc} zDDQ@DBdiN;Q0D~1!$JQRNXQ4XPiQn3Lwjq_1p$;Lc>cDCcNXe$5f6qpsQ@ayp)rvG z0@9A5b%z!u%->u#%55FJiQJedxdv|FMaX{1@ZH?N%Fv(^#s9&yH?;0c)(uNO(9^MV z$Q}6tFWf5)}gk;)&6Dc#510v`Ypaz745M1qH%oGau>3o09^Y%_&eOO54qFa=+tCc&aLj-P!`}|4q^3d+;Mc8%W!?TR(+;TI4F|^YqoBgJUKQh(k?Gs| z9H196>!ycL+W@bJYTv1fO&|Ir=bE-Y)^8tcELJmnll+BMATEcjj!iocvO04^pMk?^ zO_83a&CSht=pT%Q@|DA21fZCiC{{Pbr$Eg{j3OwaE(bId?X&J)m& z8fx!hg`{Uc?%*6fsR3A-#RG7hb}7_85u~jVOx?SBgoaqT0lqM;C9{@ zs(5B@-aAw!D754G9V*<=&q{E?Sl#w&fowv|=@+uHKRaw2521$7Z)vSWeGt$HD1d|Z z2N@uW5($B{u-!QSYaA#u@E07&z0K0SE#U44w&%pah_Et@dtsyfzeIod2&BRv2#gRA zx|1JZW(E`ETp$zA`iu1JMekH)HFcb)n%c9POP3T*gR4@T{_v}Z_wK>&w6Fk~H>-~i z;{}et3d*83Ik`BN4v{!WL1Y8RUVmVx(M4+`|wB+DLQ_7DH#pEGeiwL|G`Wn z#UQtA_E$F`FpZzd7YI5L;w7>@r^(F0EDs>#j?O#0l zF?CFn8jP2}fRXWb9<89$vRq>LJs&hY7Dg*^R9K1}5$vfK)BTf=?j)fxk?{xp&zx7M zt^BBd-o2NQk^k&j4>U{JATJW~EnIJ7P#*()LG3Ou{UoPplPhFDj|!sHGu9Jd5UEEg z4QxiQog{`!>IlGaEDr(^4N>nLQaI~KV1JCuCt`wiHZx}R#YZjppoP} zf8$4_BgFPfqMF8FM!|d(a$;~c3i1GfKR9&UVwhOZU?dE5tsFT~AER+3p`1U!dyXS;9?+ZVNpkWG- z_jA#acqX26fDZ=UMmeOX9l5rRJzlno89WiYyfRxaEs;e4>uL7Qn{M}+L>ZUT*cUcxUS9qJta5I@&4_?KpmZV+ z?N9pr{1B$)Rff8xa7PMxZLQ%>NW;z#@3)SK2XAH|(;gHPEz54Cx)m+~&Z!?4bz;S= zw6bbGZ^sR)f>~a!H18Vk$(^iIojdC8!Aa!I2G^UkIpt9`RgcRFzIv}QMae9NkEUa6 z#`MxNGTZr6r4zxA$m`|)5ItW;^TF7&F1zZ&*<{+OoGC|X@?(~pLdxC^PMmWRARUsmQ%)E zwfwxRkIEz&yxS6q0}ypxOdHXrE@6Que$df^(1R`yoZ%phKXgn-uQcd&%Y5O|cX5uuy4?yOkjqJ7nMa%4+$?4eVhG7jLzqhnwaPca_&%7?Va^ zjlHgI6n4##lH5VA)2%_FJgVRrtxmbN$MV~|i6T}ncf`RGK>rVJnNJJQQ=y!ET!hO$ z@BXPS)!7t79qy+0(?)F`X-(Ik=96$3YpnOZT2Jti zw}5|*I)pvDEhx0ju73s`@oY`cfAy|~!(E7D`Uk!KU0pVKbJGwQSg?#f_q%>A<`c{@ zVSO6|HdomOG8}Ds9kkc()`O?F^tDW)_)F)s{=w)mUZefyIdD2!TxehGXvuYVMw%ey zlAOJ2KAS{lG)8vKP)p93@`$WnhM0B^62AHJV=q0tX9u09J>>}IkYufEnsSx5t0=y7 zq9Zz#w6S)FGt|mp{NmK3^IzsPiib{jX34KH3ZGLs)p(_21|6#qtYZD{p_XU$yY?r< zrVTt<)O7!^?gbeLRCF7Uwtxrua;D9W+YsAf5jtDam6ujFO1>dc z-a+t3WaJ?`_l=2kzv{v5cqT>QOBj}4H0a)|m#;-90!0>9)2goWT}=>my?>J6bD~1{ zI9j;KfdF1_y)}c@`iqd2pb9Y%B)k@yu;HHMQc`A0Q$=s}rXBIYWBdmKsTbIi@s4Re z+@8{#3MN4tGp~WHk|{nIj^;NAO^QHR2`2UPJ$NtWiYnt{<@Ud#zND;{R9cnX9@R_v(u9bIES=4{!4oXE zf*i1Y4Ea?y4g753XoA{w!=auq2p|WZ>Y|kBQ}18e87u2U8lj(RfQX#hGkeV9WUua% zHPt2}4PFTbv$5dS8&~5hyOHFly~}KKgx%Y8MRzAG#ME8b5j$ zO_ZEz6v}?9qlV^~N@rKUDz<|Q#W{Vi`x?_J|~j66r;uK3+W$@rX6KV6aUXT2ay^@vsE z!dZEK&9+2K2s>o8)`+HTT>DzLZ-4g+Uw2wQk>>6sgnxOxXSBn*531Qi)Tt<-EF~Ms zi1l;4RJ=>L|9NSSGAXf`18_+}BXD53!3#w_SA>FYbLTHNk9-0Dvh!rWR`A>cGi}orW^2Xp? z{Y03P&5O#pF)gvZaFpiGc79Xd~ORFJ6dQMg9ugR{-&hFo_DEW1Wb6<{rNO zu~QX^*}-#n_n&l(2UVq!$E)&H{nXpfjH#ONYnYx?AaS{J;@g|6Y^-voiK%nm!6 zgS$~or)eu6@10<>?K2JCiSt1Tzzp$a1Bz!B7qt~NSBg};)+jdyS15qKgIx=~^=9Po6(k$08`5|n2J2NPO*cZ)2izk`&2t7-|?U6kL%{IOHztUa+>`2i)vdPE`GR~Xn{<3wf!oxN5~M|hO`vP%NCLaY<}~9j5)sv( z3^9lt) zjj+}a+s-@CU*?vES%t2HzG-K1 zdsCcaxhvyd?-LNO0s0^RSkEM2X;RR;*L(|i!r$s|ulZI-91r5WWRyE^z>52Q z3oYhf`M==9Cc(d3#{Uh8{)=GzoiYAGnEy&m{If?BMv!>P$Ng3)BqFjH{&_%Q!Qua{ z7Wj)GA&QN~@K5>(*T^;&)Pm2vK<8i(TAcp%pPEr`_-`oyux|fapZ%|APbF^o+fzc( zIKC)>3{8-I{_Ma1!OeLE5!s7=vs>RSngrJ3;;s=^c7Dva8n6H4r!u;ch|_E2d{xgV v<@&RPnf8kEdG7!0O=pc2hv(aS$5v%=W{bi1J?%u$EOhx2}`{FZvtV-$TZ=LejXPWjm0!VVT5VBf#>^}x-D9L_=+@j@| z4c21oXwoXSAf9;nO3w=S*TrRexbj22dxFQ=db5?Q{dr%fr}vc~=6ICO0=tKj%_<4B+A6V6zSPbC=z5?v_SBALr z$_2MHo-aE2+NM%<{=t0%KbyfVu2OT_KuGnVrPt!@8fbrS))-hF>G8fno)~0jtb7xk zwzRB|e|B=lCFK(FH00~}=0toAVuU5AOS9&}BjQ0A5sovX>{Fo{LM4&R1^5Sj+>Vh> z;mnmJI+@S0q@N>Y7GCrSQM^N_2s!CaFg$w z($L~*nfz=QC;tCFm)8@#?+m@admy}rpSkeAn*IA;*>nn@+2uTN;gkn}JseLHZ zXrulo7x|apeCsOs&@npoh2A;hf9mbOAHiFn0Y8Rp9G~jXUhEimcDu^56asz+bjLJW zB3RUY-K*tzuRrc}qn-u`Nn^4*aw>Yc&phNqLZ6b{fP?|Ng25|q)LxS_Pu3e<_vXXv>?LO*)4TLr^XM7g5N{69aL{J$zn(LzhOzvJ8pJSJv}BhM!WJG{hd|Cy`$P#MY~N5V-U*&M z(TD7j{T-nHF*x7gM6v>y3=M(YP-uyrz5TSCg+-x_$TVhkWM5%;ANx*{n|uG*x8jIsMKt=&~sU)U)3=OnO7>6bSN?>!?!$RsBx zzl=)QX+nMcWY&41m`$>QzCtBdRXLAa;MYy+lg8iXEjIYGm^4X2V1CpSQ@DnYk1tfZ z!9Lg7aVQ9dLitkC(!OO0*cmm~5+4`yMk2oS&%PFNYB_V$>Wi;jUR{;>*u=WW0^aFI zrE&X4bGO%wnElD`vcRCZ1OD;D=Yzsd2+iizrI{<<(?5|ekrXUvFFd63S@F=6vCJZl zXDfpqd{z#=m2%CVTUu#yjt%Z>)P-|FULFk)*X{VNDRdb!a%|D3L7G>{qjTN(u<2+# z&HyE7k9}@!eP6gW%l!9`iq+Dw+UPPZw4AL=#70+d(X%g%4`0d;1WIGDsRusNqax|om>q8-+f$C zLi2)>@gaM-DYG}&X7(~Zmjm9=LY2wTAZXPyKp6aSWT>4Mof+BqJOWGsNtS zBtmn$#K%1nk3_q)|MoD#ygy?T4nC3(AW<`dF8Pgxg-tr9rVIGvb%~zWZG?HZ8&4A? z`(u9GXgm$_{fk>%-15!3xRL=GqGv$0&<1-(IsXmvg_CoR$@Pz!Ueong_U_@-)YK6( z&gMB!Tf9ZXBNgY-=s9mO0f9ei*LE2iNP7Cb-i!T?H+@sOac7R-dN=FMpFcBJ#mLaw zt`6c zd*gHU=XPCYGSJU8jF4*#6++mM{`BMTkH#%d${lBX@34a}D0Gbg?13;t!|Sk!h*@d- z?)yWP1TWmnshk{!M|{&?K*e7aH{Hk)@n3F8Y-$pBxZH%SfQ=c*B4)u+$*7#;HE5$Q z)2ZmF&LfiuqN1CeHF>0P_`;CTCN!i>Nj_|0G94p2)W;~`N~VyY+%=$++FVz=yVAe( z1@Y-iH7cX=<;^xppXn7;QQJ9PnNI66#`6sC?VpK4?##38fdHB;v{h@J(S};ww*3ym z_~@u2z7pzp{vyH7QQ2Mfwm1Vh+D4iR`nf|}9!y<7eh4(h9?rA<`uJNH;wqIkPPDvfH2YfrTn5I5S z0n4i^?B)x&z(ge!{PU9c$5AV>fIu}B(OSvcoqRGpoi~&EPh*Wj{0@cC3xBw>dp;G! ztPhDBiXG0>f6;9KGz-X2Rr@3-#m?oNB*K3ib`+S8%kZHBsF7guO+JHrNyCMKrGuF^}5Zh8`u zQUCyJh9H}cvD*sY)V7rGVy@Ue4sb3#v*g`Uf@d17+<)EO3r@H2!Br_!wxDe`wkJE% zB4}ne@RNg9Uh~wH6l$K)eaAG*-gD;%O#)9<;7&X(>%p%yO?6fK+uW%cY8m9RDt{J* zUdO6Glc@8=UfFssmIRQG_}ueZZ+3Fg(I;UITGYF^Y_s95x7(U54LHAOK}cTyMgX!f zvxji5>9}mEN|?XDj-sxRj;39IzhKyE5Z|}FTC#nSn zE*2K|e%F-)W5)Un58b`aa`eq>%h63#_>hA$yD{WVL;dzmG)!@GvFyj-=$?M=;8n5nbAMA3gf2^}=!_U^-qy zBmKNj@sFt^#ooRvvy{vfN^L3?Pj1HVpc^!4a+Dca?HGFa;b&x{SoW<=n48cfTaD9b z>edz$t4S@JuHQwp)fC*bEM2vLD29k{+#Z0x4k?S0pV7M&LaIwiH*dZlP zA|K>R`>HZYe;xX~3+Y!mG*=`?9%DAe z_6;33cq|I3wX|P|1Z}gP@(>`|$H&LH?Cf}ifMi5=wYe5;;Wz&;D>jBS_hv(lug(bk ztK6EG;&8#|%)N5J6R^i9xyQbCwZ9HfQ-fj0+uWkT7Edc%pz~m~I=Gt*v$oCZS~_bl zC6|oyJ6_1Ah5X(=0;=Zwt!~~rBrP|pyS8Myc|zO}@SQ5b&MCgftOR_C{~iA7q?&gj zkX)~{6URsq`@p&4Cc0Af1kLsg*SVd0S{bUG%8K!~PxX&KE4zVCkq@e)!Szy=!SKcX z(x(tMm&*%1U2aRkE@5W@=UWd!sYA&o{-KF%?`9+=Yw!<-lp=7!qrlV2U0mF^JKmYn z%xj|9BPTrCsetlIH>-KDk?BC)^N!qFAW|CgvDHL{CyuQj;e?le)u9Dt&Gf9eg!Anqx>v zXuOq#l$5U+8D87Ph%{47VCC61;kR6UX%{4dv-j-h)NJ?Y@>e zG_%?++5%VfGiDLn=Y}309?Q`*T(;Wa62C}>CZwgM9h3LiJQ?^EXNPfqY$-RfQI{p| z;rqsv`8FsW*kxp{-be*~JHH$$cnXeAR=S;S)y`X~tJzk+WAjowNC3ZcfRb{5P@h`+ z)wV6TQcD*Wsh6HEMDFKlqaUA{&Yva9MN$MXJ4R)`uikZOV`T5(@jugWdr8=}+1c5t z37WuyCca*4ryMCiOKk6&tT?DTIu0eE(7e>E1I?dZf>uLzX~Hf^>#kqIHyw7&hL|fSJl4n7p0t&O zWC}QAzkkn_;~{1wsF114s$_4 zwoG%#r{v)8bX{Ft{oPf+evGlOSZi;)>>Ugxm}7hgst@;#2{{j&#|syyHKP8z0LA8< z!Tu2X%6_gy^Wf~$!_k_Sb$X%j5av77G&DQ02|LnxrKRkgpa3!R9>9R|`?(yrnwy!K zRj^gNM8(Hz7?v7C9|(Td(#HkU&^=A)fGTIzm{IAa-z-u*rpI2GDR1v}%r~Tj=O%`A zye?_Se4gL~s1KRf*VhS{;xt+%Kw&@}cJS!^P&M7tXGIvyFz4!U)>=W6g3jxh7?;yM zi-XOdnjTOfczeb#o0kVGzze2vGhKP_l3+JBuJ4J76@&;h`CXiKycGx@_draLeq;;9?5L~qFF37{+RagUomG<-&K17eXpfD)7o zh@yk-rY_JvXK2XR>Z+=H3zsG~X{Y)qt86OIG|$8f9o51JV}1z(%_fFsCzVEZH*G=$ z5%7;frlbITAQV92I3tJzloBSRYQgiAnL1(nt^8yQ_y8|pLoVb8qj#2uVwB9-p> z^MS-6HAG%C_%C291GD^btI-R1`E#NKO2qH1>^4hbP=qzYn|}b*D?!!0Jyuz%VYdPP zs_>z^<%5(mc;2KPzEF!d6LoLI$q*ySFHEOQVwZr2`N=zW!cp(Ts04v>z{Gx~=z{D6 zwQODTr}viOZ{Ub7&Y|BoQNW#o7RHX9Fe-IB@kGB_y!qiQu}Pe$8_sI)UBA$Scc<|U zugFT?a|GVNqMkL8VQ`@<>VR6)A1EDgl?Xo?Exx;lLx$KBZ|#~;Q#mCImV|WQG$eG? zrwd+xVQ+=wEU)5I)_a&}2Hvj{ehWoztAtxcI3dU|{GdFKG7&z37bt!2(wApA<2>*uuKHG zEMIDnzIi?PBv1=4K%ErA3fCPV&<(B2WztQf4#b(~gmkmHSG-Q%eMbQKCKkTsUKH7< z4E3^+M5>27$ibqt0bXS&+YA09Z9whA<(la@ARQuPzqPtPfy2OCFkNOBlK)`X8Bdbi?*INbCwf4;D&?>;ASRlICkxF;Q+B+U8AD}VxyH%W&()FNxx++R*_;ep7}bTpnQ?i~XapLSOBB&WURh};JZC@(*loycmt=tIX^ig+XXhcM21e_x34ob z=*zNf=IG+nMw^ckcZ{R1C<%AlsMw#FN-g7qd-cy%qC;@29EOmKP>J`91GB1OQ z6bHQ344*6$6Cf$~>#H27K6wDMVL~r)K5&#D@8~ZL>DQSxZ+E&ty8EP`54M|Tt!3jo zGjzxo;A_0cznIKy-%G2y1q%bxZ`fsVn6yfH_Z6bj;Ag*lVKnx3h3`#TQI6q@`bdK3 zW`0lkPa(t30g0876;$l}(wtKxG#b3Y>RR+5MP4BFIP>J$-aRvLA4O=d4DQ zdvGKIAJ8N-xa#dV6lh~b2_Gf0P{;yNwmqxXZJBq%bjdav%gS1BH?0;VYXC1uAcekm z?X><4`SWrYckLlz81fAd1o?Js-iqrhDt7XTsyaWs4tuAdxv;}~DRWcXIlnZ+sQ#cv z`@blHCl(wRoc1`fD(`UT1#7nnqCNHbxHf}@UT#@_9uzbL65+Fi9~6lGWQ!bcKq1`^ z0b?SFZy7Q?1MC~lB`wO+F~SkQ+=|<}_b4EPJOFl#4>*xR;{Dv(9XP|haXQR_OUmGc za4M1eywMpQocKK#0$^S%(jFxB&B`)44@5?Qaa5~s)Ii)JV5uSTS0+HlwVb6ch5ZzK5*PBZ8CWHo307=;T!s=LV-8U1}6tJdgA0z1|K6vL>_TUwn&Y)@!q|0>sEWCnj* zRH|Pr!1`!Z<4zb)um|9X?_89;=T9^BlQJJmW|Hxm(J|E5T_Vwv*@-In7ze>uz`cKT zX`m-poV=Ixy1K-M43Tx?IY!#V@3$R*m!DjC4>d<_cfbDF+JsMoswvA9%NmygXFWK& z?3Zy-eoTnNfC6@*!S}43P%GgftmKOu z;jt=O_?I|vl>(P8>A9TY3}e9i@LWajV^gXLC{QC9P;H7k?~o2(*MNaAs-HZL9z?XxT!>2Ank#5HSMoTY**O#&o#(fYAJI#1dOt8iq&oDdGc^OI%P44?9w z&HL)z39lG!lQYR;AefGM^5pkqFP5Z(20n1RqSxAT(;cvLS>c-w1r$_zylROE$Pj!a z-BsiSByu9m9gnA6Ly_Q8XWAOoBISOp1dUuXRIFNHLZ8BR6O0P>`ePqj>W z@2b&2I^hOO$`Ei_=tcfHDsCPBdWtziWpH(Q3TnOcgoE;(3*S?iq$axtzc7C!cq9qT zDMMw({5DNM5&1kgtO7e$X$S-ke%GdMs{ee3ZGWtCYFyC^sRM&F1Rym47)VVbtj148 z!UstSaS(X(4O{wTw85fX4tW5X!5!J*nnMC@LBlD`IbfdzxAiq(A7Qg@=uUbeHtp#zmVF})wif)c)>B%7jxQq26VI6Q5{ z)WE1G2BaenJuXpHom|MXUpov9YW9Nd-@>SP`7&7UfNN?cuBh30P=tdZZRGYGP)-ZG zc=~ug4#%YZ`-KyZJsqsp&eG489LgzU&jx@C0Zn=R{eHXM%;3Wrv5__NTlSBOO`pMI za%h6pfb8#0m$Ii-*b8H)-TffLgdMhW!cktF2IbXCv6|25bGl%vaT(H}%lXHGT(BsA zpv&Q^oWNODMeJd)C2AQbg>b}viH&eKGkH&{qR*9S{|T_4#X3o}VJNwNPzp(`dgow2 zNStPx)sNBc@=ZVrnqa8?+1=}LPY^1nuO`gJ;!ghxZbSvC05ti@5(i8&iO%H5ua&E2 zN2cE4bZk{2O@E#~!`Qa=GQ`@;cyZc+QNDvi1r5XGAL^=IMm-pWk}{$k`WTbrs%As& zFlH+*s z^qQLBkiJ1(8Eq(PbW|NurO}onc(@+X5}TN47RCAu44O96p{nXm!xbes)BH_*7VZac1Mx(V?J-}; zz1w(bTk+ud3d}Gt@97kYBrtuyeXwhb5SxE&^b0T708+3?F>eFuzFpDB@$Ckd30VQ1 zmg*^B>X3%6So6hJu1tC8;HBsFWtdfIwieoznQ{gX!UrM`84{-Z7#%lKX_(F!WnUNJ z`JIkcvH}WX*mDnldyhQd>!!THJGJ^p?B4{O4carG<80Unu;D1B!F>;e6`gXd9u!v0CuA?-Bsgh31QoVKq@JujUFbCIw8(4`(OmGI+S&r3ntO+I^h89Kvn8zgWz#G z!Qgd>MO27Oe`?u%s=GrWDXVhY$Z8HBd=(HOK`iSN+P8T%9D2wT3sVuBWb32@@{_F? zQMRXFRF7rgDtN)%knUFBSY;m4qmC?`J0yp4o)J7R6@4DO+&b}`nO{);O#*Q|G-Ig; zlxSctz3pqh6tDg$6q{jOl5}dyB*2Z_q?L95zV|z-k9@(QpbMP%Iql?R^mU9U_Bz?lXE9x< z3g-IplnWY!?jONbo>5d0L)gJ}(iF@*^>pTmc4UY+e*6Ag^ttCW!Tg9u*(5`;@;J0 zipoyV4yE17v}GbO5dvFt zn2deU1Iz&S8R z1bJjELvb(jUELdn`W`3_KbRiE`|y*#)eIr5I(>yel`}((BzG zQbR-T&H@?wnf}bc^e>NHHS))m(eEl9tsXqm(Dc*&X5y`@()d)Hh5shbjpsq6Q+cpxO70ZgM1n&NAyPr&;U1&JdUb|&gAvC z9yKg1Q2>mnpf=FX=}qX0`E@QhDna(lIY&(4#yOpc(#i66rCz+DGW(~J&@@btF_{+* zb+Wx9I|Zs>-uK;9`E=vu!(D?f?EeJ-vJ+DM^M+ z%C}b6!(mza=&io~(Obok_x~FdRkvfO9GlR0UUq&w^O`teCXel0WV4iwWMS4|rj99$hv7pgRzyuq2ao=>;@(y1py#0V=^YTl=_x4+M(VMr+y zhh08z`JwJe=y**8cc|w)R;w2BDz9Nh*W|*9hUENSor7y^ch1p6->wU#^4ziZ4-v+Tix2Z5T;pYQUG*WF-0(sZ^# z<{*Zhuj%u1vd^lVHM68w+P3F4!lOtbr z$e8@CUi$kVGnQ`b>TGhk{+d>Y0ME6x-;K0i$ySnIQ`g^qZ-qsu#$D&0iB(RJ07Eq;*nFB02jT%D6a{}oF+a-Ckl=zl)@VYu!qXAJ*Nt_|jsSlR5- zH8us=$rl8*cVAkQ$3{pL__{vD)Iu*hVbq66j{-@E$;n#-zzT{M-#4T`r|QiHe{rk6 z6DOZ?^<3JgDPmy*KbQ|_ZIE(-P;|V&cV_p^5)3>9Z4r4!O1)WBSe>&@5b+OKi*Ry< zTrvqVp(lFpL%arZQORJVR%M&X+fqR1C)_1C~4vxC?6CSyw-bnuUd@UfK5 z;Bi&ONAtz+pRF5^D?I&-=OrtvuNrf@zi8yOd!M}Fx^gmu2UTuOUp@huse4}EHCqNr zTAcLiIQoA#04t}t^^>ii&wCeDRnxs!P^0QR=?NgG zs#=+0`8~602~5zVK~f9?D&%n&YR79}%%4>X{zOpHm_B_R>jx z^PHUeZKz~a3g6Y?{`54ZNb(vy*$UHEZFULYrBk&$1;ZiC+xs%AyR zhLf&iEwhhh5^#abj8jQ)urxG`;@d5yDFNM!PUDR&zs_zye!pS=Ym*?=zOAT9xp{R1 zBzi8Vhv1|%~-wnON)XEz`ORI(4`}WA}*U{18 zKkefWCn-X2oJV7qU$^GKxGe|yKAInFlv!m5w9Pu9OuH0tWn1U?6-g+?QCqWlT)+pv zglup7&w^mrtLA~=-VGRtINi&Mx6a+wTl>;@jH5rAhoAgXm;dozhg(qURV*a~=eW)Wg zXEV4f7!g6F^ek=x8LD<|*By@w?SYT>+ewg7Z*m>IeQ*>=z0_Njt>1hZ)auJ_7kV;gX%et49)N0T&Ku7GeV?TDd^&bEz)_(+u-cDNV z?zhUC`nLv%HpetYV!H!fduG+QI5;@?GE|<5pPiF-&&&lsFxHFzVLr0kMcIR?HRZWX zYWz4Wd^G0_&aU-daVb{4kmr5P8DRNl_t0ye*cres0%tx)ec^P|y?o{VXi zx&CmfsT}vFoS@Db}X< z*GsN3pS$!J z_x3|~QNUzYZgh8|uv|e=qg~SH_Mo6K0I24iKCh|MNlsf5npzVnq1kpk5E4aW!HlJQ za$r6b6zXpBFa;*tB<-&H?4KiZUOF%kl=9Bsj(xQG+fcmQjlOuMP4A1pL2`bzSI+x4 z)uJ?vr1YW_iJ6>t?=5XBYXZ8)FZWq92!Mv2kO%Z#O@1S^*Gw5i?Y%BW#>cEu)nL|1 ze<*f6O}4Iv)?k)893I~Q(@>uQZI&HjFjTW>Tq4rt=U$5U#g9hb4U_~}SyCZw{Np`{ z!ir1F9VPKUT2|*^)RCBTXns@E*-M9t0)=A|eJ~Sr>7x7#mcGTr_Pjd*Wfj6v!gNt; zg6-GXgKr6}ZzI6V-fJl$H|f!g!0!6sAj`L(>LUcf4GqIzbThA`_3)B*9qh>_-wEzD zp5J1wYSXQ6X~wHrm7Ffl##fh)(J>ypb}pctZv?a!H&Fm)Ma>;MRj?#wNnJ9=L9DrY zBQF35p7Cj{IC1pJ`6oPo=`8biz?kiDxeW5uA8b?4ZS6)3?J%ck!KByP=AIQldZ&^U ze_L#?Ze9OE83w~Wk7l~|O%077X`mWTCGA<&M$1btf}F8R_p4<@#W_ZOEYVFS90>kl zJsoiJ9cSF*O~N&TjO{I!1ogs$BN{O`P!9n2d8OMu!O}qK`fYjJ+nDzH!iJMgu=49a zwE&9^Ps)O_$to)9iYsE1YjnS!{MraY$$*eCJGK6v4o3YYx+>>(y-WN*(R$O!7y8>g zlDcT9xNqya_yx;(bZ{^B?&eO4x{AvB&#uFDvOr>Vq5%ommE%*<0h zXcoE1<0hGa7sXR4N!!Bz4b`8}9lgE!lb3h+VXXkeu zES&u4?Pk5}keWqme0$Hx+y1d-asYe8AntA*_W8T1p@qdtH&}l7kQ;i6O5Am3C4Am* zCa`3^klS=C==^B~Sf;UczN7wZY03kOciw;O-JByyf@oe|UOoYLflufM2S$PduyJXG z=Q+2R=>YH43wVPa( zH;^@8$(h8`Qu8;kGEpfSRILoz4~6e?bB|H6&_t%E>uxC8-~O0q-XiY!;lck0#$U=} z!X-UdW-dMJ-w4?N%P`j>nwpwI6wI}DC=|Lpa3NAdmTu+X5)Tzdx%D@?S^3%sdjo6# zQIyQK)Y-+s7x5%4jpU1I`fTo}oJ*O0^!Htk2!Chre<%Ru MCu;IYn8~aE16B{X2LJ#7 literal 0 HcmV?d00001 diff --git a/content/en/docs/Description/ha-transaction-processing.md b/content/en/docs/Description/ha-transaction-processing.md new file mode 100644 index 000000000..a30f92e42 --- /dev/null +++ b/content/en/docs/Description/ha-transaction-processing.md @@ -0,0 +1,15 @@ +# HA Transaction Processing + +openGauss manages transactions and guarantees the ACID properties. + +openGauss provides a primary/standby HA mechanism to reduce the service interruption time when the primary node is faulty. It protects key user programs to continuously provide external services, minimizing the impact of hardware, software, and human faults on services, thereby ensuring service continuity. + +**Fault rectification** + +Node faults can be rectified and the ACID properties still exist after the rectification. openGauss ensures zero data loss after a node is recovered from a fault or restarted. + +**Transaction management** + +- Supports transaction blocks. The **start transaction** command can be used to start a transaction block explicitly. +- Supports single-statement transactions. If explicit startup is not performed, a single statement is processed as a transaction. + diff --git a/content/en/docs/Description/high-concurrency-and-high-performance.md b/content/en/docs/Description/high-concurrency-and-high-performance.md new file mode 100644 index 000000000..24576e50d --- /dev/null +++ b/content/en/docs/Description/high-concurrency-and-high-performance.md @@ -0,0 +1,4 @@ +# High Concurrency and High Performance + +openGauss supports 10,000 concurrent connections through server thread pools; supports thread affinity and millions of tpmC using the NUMA-based kernel data structure; manages TB-level large memory buffers through efficient hot and cold data elimination; achieves multiversion access without read/write blocks using CSN-based snapshots; avoids performance fluctuation caused by full-page writes using incremental checkpoints. + diff --git a/content/en/docs/Description/memory-table.md b/content/en/docs/Description/memory-table.md new file mode 100644 index 000000000..6e9aa6379 --- /dev/null +++ b/content/en/docs/Description/memory-table.md @@ -0,0 +1,4 @@ +# Memory Table + +With memory tables, all data access is lock-free and concurrent, optimizing data processing and meeting real-time requirements. + diff --git a/content/en/docs/Description/operating-environment.md b/content/en/docs/Description/operating-environment.md new file mode 100644 index 000000000..df2d9a559 --- /dev/null +++ b/content/en/docs/Description/operating-environment.md @@ -0,0 +1,15 @@ +# Operating Environment + +## Hardware + +openGauss supports: + +- Kunpeng server and x86\_64-based universal PC server +- SATA, SAS, and SSD local storage +- 1-gigabit and 10-gigabit Ethernet + +## Supported OSs + +- \(Recommended\) openEuler 20.03 LTS on ARM +- CentOS 7.6 on X86-64 + diff --git a/content/en/docs/Description/primary-standby.md b/content/en/docs/Description/primary-standby.md new file mode 100644 index 000000000..018d4def4 --- /dev/null +++ b/content/en/docs/Description/primary-standby.md @@ -0,0 +1,6 @@ +# Primary/Standby + +The primary/standby mode supports synchronous and asynchronous replication. Applications are deployed based on service scenarios. For synchronous replication, one primary and two standby nodes are deployed. This ensures reliability but affects performance. For asynchronous replication, one primary and one standby node are deployed. This has little impact on performance, but data may be lost when exceptions occur. openGauss supports automatic recovery of damaged pages. When a page on the primary node is damaged, the damaged page can be automatically recovered on the standby node. Besides, openGauss supports concurrent log recovery on the standby node to minimize the service unavailability time when the primary node is down. + +In addition, in primary/standby mode, if the read function of the standby node is enabled, the standby node supports read operations instead of write operations \(such as table creation, data insertion, and data deletion\), reducing the pressure on the primary node. + diff --git a/content/en/docs/Description/product-positioning.md b/content/en/docs/Description/product-positioning.md new file mode 100644 index 000000000..1631f12d0 --- /dev/null +++ b/content/en/docs/Description/product-positioning.md @@ -0,0 +1,8 @@ +# Product Positioning + +openGauss is a HA rational database that supports the SQL2003 standard and primary/standby deployment. + +- Multiple storage modes support composite service scenarios. +- The NUMA data structure supports high performance. +- Primary/standby deployment and CRC support HA. + diff --git a/content/en/docs/Description/public_sys-resources/icon-caution.gif b/content/en/docs/Description/public_sys-resources/icon-caution.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 GIT binary patch literal 580 zcmV-K0=xZ3Nk%w1VIu$?0Hp~4{QBgqmQ+MG9K51r{QB&)np^||1PlfQ%(86!{`~yv zv{XhUWKt}AZaiE{EOcHp{O-j3`t;<+eEiycJT4p@77X;(jQsMfB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Description/public_sys-resources/icon-danger.gif b/content/en/docs/Description/public_sys-resources/icon-danger.gif new file mode 100644 index 0000000000000000000000000000000000000000..6e90d7cfc2193e39e10bb58c38d01a23f045d571 GIT binary patch literal 580 zcmV-K0=xZ3Nk%w1VIu$?0Hp~4{QBgqmQ+MG9K51r{QB&)np^||1PlfQ%(86!{`~yv zv{XhUWKt}AZaiE{EOcHp{O-j3`t;<+eEiycJT4p@77X;(jQsMfB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Description/public_sys-resources/icon-note.gif b/content/en/docs/Description/public_sys-resources/icon-note.gif new file mode 100644 index 0000000000000000000000000000000000000000..6314297e45c1de184204098efd4814d6dc8b1cda GIT binary patch literal 394 zcmZ?wbhEHblx7fPSjxcg=ii?@_wH=jwxy=7CMGH-B`L+l$wfv=#>UF#$gv|VY%C^b zCQFtrnKN(Bo_%|sJbO}7RAORe!otL&qo<>yq_Sq+8Xqqo5h0P3w3Lvb5E(g{p01vl zxR@)KuDH0l^z`+-dH3eaw=XqSH7aTIx{kzVBN;X&hha0dQSgWuiw0NWUvMRmkD|> literal 0 HcmV?d00001 diff --git a/content/en/docs/Description/public_sys-resources/icon-notice.gif b/content/en/docs/Description/public_sys-resources/icon-notice.gif new file mode 100644 index 0000000000000000000000000000000000000000..86024f61b691400bea99e5b1f506d9d9aef36e27 GIT binary patch literal 406 zcmV;H0crk6Nk%w1VIu$@0J8u9|NsB@_xJDb@8;&_*4Ea}&d#;9wWXz{jEszHYim+c zQaU<1At50E0000000000A^8Le000gEEC2ui03!e%000R7038S%NU)&51O^i-Tu6`s z0)`MFE@;3YqD6xSC^kTNu_J>91{PH8XfZ(p1pp2-SU@u3#{mEUC}_}tg3+I#{z}{Ok@D_ZUDg- zt0stin4;pC8M{WLSlRH*1pzqEw1}3oOskyNN?j;7HD{BBZ*OEcv4HK!6Bk6beR+04 z&8}k>SkTusVTDmkyOz#5fCA$JTPGJVQvr3uZ?QzzPQFvD0rGf_PdrcF`pMs}p^BcF zKtKTd`0wipR%nKN&Wj+V}pX;WC3SdJV!a_8Qi zE7z`U*|Y^H0^}fB$R?oG%6hQ z+MMLZbQBH@)Vg&1^3?qHb(5!%>3r0+`eq=&V&E}0Dypi0000000000 z00000A^8LW000R9EC2ui03!e$000L5z=Uu}ED8YtqjJd<+B}(9bIOb$3-31_h|V>=0A{ z1Hh0#H30>fNT})^fRU_83uewx9oRr{f{Sx1Ml`t)EQ zGkHZ67&~y{W5Jpq4H_WfuLxp*3<7O}GEl;1ESe36fLNs=B0&LQM1Buf(R)qg(BRd`t1OPjI1m_q4 literal 0 HcmV?d00001 diff --git a/content/en/docs/Description/sql-self-diagnosis.md b/content/en/docs/Description/sql-self-diagnosis.md new file mode 100644 index 000000000..94efcafcc --- /dev/null +++ b/content/en/docs/Description/sql-self-diagnosis.md @@ -0,0 +1,8 @@ +# SQL Self-Diagnosis + +To locate performance issues of a query, you can use **EXPLAIN PERFORMANCE** to query its execution plan. However, this method outputs many logs, requires the modification of service logic, and depends on expertise to locate problems. SQL self-diagnosis enables users to locate performance issues more efficiently. + +Before running a job, set the GUC parameters **resource\_track\_level** and **resource\_track\_cost**, and view the related system view and possible performance issues after job execution. The system view describes the possible causes of performance issues. To optimize low-performance jobs, see "Performance Tuning \> SQL Tuning \> Typical SQL Optimization Methods \> Optimizing SQL Self-Diagnosis" in the _Developer Guide_. + +SQL self-diagnosis helps users locate and optimize performance issues without affecting operations or modifying service logic. + diff --git a/content/en/docs/Description/system-architecture.md b/content/en/docs/Description/system-architecture.md new file mode 100644 index 000000000..a3fbde959 --- /dev/null +++ b/content/en/docs/Description/system-architecture.md @@ -0,0 +1,46 @@ +# System Architecture + +openGauss is a standalone database where data is stored on a single physical node and data access tasks are pushed to service nodes. In this way, high concurrency of servers enables quick data processing. In addition, data can be copied to the standby server through log replication, ensuring high reliability and scalability. + +## Software Architecture + +openGauss is a standalone database and can be deployed in primary/standby mode. + +[Figure 1](#en-us_topic_0237080634_en-us_topic_0231764167_fig5205420191411) shows the logical components of openGauss. + +**Figure 1** openGauss logical components +![](figures/opengauss-logical-components.png "opengauss-logical-components") + +**Table 1** Architecture description + + + + + + + + + + + + + + + + + + + +

Name

+

Description

+

OM

+

Operation Manager (OM) provides management interfaces and tools for routine maintenance and configuration management of the cluster.

+

Client driver

+

Client driver receives access requests from the application layer and returns execution results. It communicates with openGauss instances, sends application SQL commands, and receives execution results.

+

openGauss (primary/standby)

+

DN stores service data, executes data query tasks, and returns execution results.

+

openGauss supports one primary and multiple standbys. Primary and standby instances are deployed on different physical nodes.

+

Storage

+

Functions as the server's local storage resources to store data permanently.

+
+ diff --git a/content/en/docs/Description/technical-specifications.md b/content/en/docs/Description/technical-specifications.md new file mode 100644 index 000000000..2fd94d157 --- /dev/null +++ b/content/en/docs/Description/technical-specifications.md @@ -0,0 +1,81 @@ +# Technical Specifications + +[Table 1](#en-us_topic_0237080616_en-us_topic_0231764304_en-us_topic_0059777844_t24ab71cac351418d8d5b1b8bd1d942c7) lists the technical specifications of openGauss. + +**Table 1** Technical specifications + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Item

+

Maximum Value

+

Database capacity

+

Varying with the OS and hardware

+

Size of a table

+

32 TB

+

Size of data in each row

+

1 GB

+

Size of a single field in each record

+

1 GB

+

Number of records in each table

+

248

+

Number of columns in each table

+

250 to 1600 (varying with the field type)

+

Number of indexes in each table

+

Unlimited

+

Number of columns contained in a composite index

+

32

+

Number of constraints in each table

+

Unlimited

+

Number of concurrent connections

+

10000

+

Number of partitions in a partitioned table

+

32768

+

Size of each partition in a partitioned table

+

32 TB

+

Number of records in each partition of a partitioned table

+

255

+
+ diff --git a/content/en/docs/Description/vectorized-executor-and-hybrid-row-column-storage-engine.md b/content/en/docs/Description/vectorized-executor-and-hybrid-row-column-storage-engine.md new file mode 100644 index 000000000..ced956b4f --- /dev/null +++ b/content/en/docs/Description/vectorized-executor-and-hybrid-row-column-storage-engine.md @@ -0,0 +1,52 @@ +# Vectorized Executor and Hybrid Row-Column Storage Engine + +## Background + +In a wide table containing a huge amount of data, a query usually only involves certain columns. In this case, the query performance of the row-store engine is poor. For example, a single table containing the data of a meteorological agency has 200 to 800 columns. Among these columns, only 10 are frequently accessed. In this case, a vectorized executor and column-store engine can significantly improve performance by saving storage space. + +## Vectorized Executor + +[Figure 1](#en-us_topic_0237080624_en-us_topic_0231764690_en-us_topic_0059777898_f9d90aebe179a40759039d0263492489d) shows a standard vectorized executor. Control flow travels in the downlink direction \(shown as solid lines in the following figure\) and data flow in the uplink direction \(shown as dotted lines in the following figure\). The upper-layer node invokes the lower-layer node to request data and the lower-layer node only returns one tuple to the upper-layer node at a time. + +By contrast, the vectorized executor returns a batch of tuples at a time, which significantly improves performance using column storage. + +**Figure 1** Vectorized executor + + +![](figures/向量化执行引擎(png).png) + +## Hybrid Row-Column Storage Engine + +openGauss supports both row and column storage models. Choose a row- or column-store table as needed. + +Column storage is recommended if a table contains many columns \(called a wide table\) but its query involves only a few columns. Row storage is recommended if a table contains only a few columns and a query involves most of the fields. + +The hybrid row-column storage engine achieves higher data compression ratio \(column storage\), index performance \(column storage\), and point update and point query \(row storage\) performance, as shown in [Figure 2](#en-us_topic_0237080624_en-us_topic_0231764690_en-us_topic_0059777898_fbb2af39ce12a419cb437829aaf1cf4fb). + +**Figure 2** Hybrid row-column storage engine + + +![](figures/opengauss行列混存引擎.png) + +The restrictions of the column storage engine are as follows: + +- For DDL statements, only CREATE TABLE, DROP TABLE, and TRUNCATE TABLE are supported. + + Partition management using DDL statements \(such as ADD PARTITION, DROP PARTITION, MERGE PARTITION, and EXCHANGE\) is supported. + + The **CREATE TABLE LIKE** statement is supported. + + The **ALTER TABLE** statement is partially supported. + + Other DDL statements are not supported. + +- For DML statements, UPDATE, COPY, BULKLOAD, and DELETE are supported. +- Triggers and primary foreign keys are not supported. +- Psort index, B-tree index, and GIN index are supported. For details, see "SQL Reference \> SQL Syntax \> CREATE INDEX" in the _Developer Guide_. + +## Data Compression in Column Storage + +Old and inactive data can be compressed to free up space, reducing procurement and O&M costs. + +In openGauss, data can be compressed using delta encoding, dictionary coder, RLE, LZ4, and ZLIB algorithms. The system automatically selects a compression algorithm based on data characteristics. The average compression ratio is 7:1. Compressed data can be directly accessed and is transparent to services. This greatly reduces the preparation time before accessing historical data. + diff --git a/content/en/docs/Developerguide/Developerguide.md b/content/en/docs/Developerguide/Developerguide.md new file mode 100644 index 000000000..9b70b86b7 --- /dev/null +++ b/content/en/docs/Developerguide/Developerguide.md @@ -0,0 +1,79 @@ +# About This Document + +## Overview + +This section describes how to design, create, query, and maintain a database, including the SQL statements, stored procedures, system catalogs, and views. + +## Intended Audience + +This document is intended for developers on C/Java application based on openGauss, providing necessary references. + +As an application developer, you need to be familiar with: + +- Knowledge about OSs. This is the foundation of everything. +- C/Java programming language through which you can develop applications. +- An IDE of the C/Java language, which is the prerequisite for efficient application development. +- SQL syntax, using which you can operate databases. + +## Symbol Conventions + +The symbols that may be found in this document are defined as follows: + + + + + + + + + + + + + + + + + + + + + + +

Symbol

+

Description

+

+

Indicates a hazard with a high level of risk which, if not avoided, will result in death or serious injury.

+

+

Indicates a hazard with a medium level of risk which, if not avoided, could result in death or serious injury.

+

+

Indicates a hazard with a low level of risk which, if not avoided, could result in minor or moderate injury.

+

+

Indicates a potentially hazardous situation which, if not avoided, could result in equipment damage, data loss, performance deterioration, or unanticipated results.

+

NOTICE is used to address practices not related to personal injury.

+

+

Supplements the important information in the main text.

+

NOTE is used to address information not related to personal injury, equipment damage, and environment deterioration.

+
+ +## Change History + + + + + + + + + + + + +

Issue

+

Date

+

Description

+

01

+

2020-05-12

+

This issue is the first official release.

+
+ diff --git a/content/en/docs/Developerguide/abort.md b/content/en/docs/Developerguide/abort.md new file mode 100644 index 000000000..00a30ff5d --- /dev/null +++ b/content/en/docs/Developerguide/abort.md @@ -0,0 +1,70 @@ +# ABORT + +## Function + +**ABORT** rolls back the current transaction and cancels the changes in the transaction. + +This command is equivalent to [ROLLBACK](rollback.md), and is present only for historical reasons. Now **ROLLBACK** is recommended. + +## Precautions + +**ABORT** has no impact outside a transaction, but will provoke a warning. + +## Syntax + +``` +ABORT [ WORK | TRANSACTION ] ; +``` + +## Parameter Description + +**WORK | TRANSACTION** + +Specifies an optional keyword, which has no effect except increasing readability. + +## Examples + +``` +-- Create the customer_demographics_t1 table. +postgres=# CREATE TABLE customer_demographics_t1 +( + CD_DEMO_SK INTEGER NOT NULL, + CD_GENDER CHAR(1) , + CD_MARITAL_STATUS CHAR(1) , + CD_EDUCATION_STATUS CHAR(20) , + CD_PURCHASE_ESTIMATE INTEGER , + CD_CREDIT_RATING CHAR(10) , + CD_DEP_COUNT INTEGER , + CD_DEP_EMPLOYED_COUNT INTEGER , + CD_DEP_COLLEGE_COUNT INTEGER +) +WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE) +; + +-- Insert data. +postgres=# INSERT INTO customer_demographics_t1 VALUES(1920801,'M', 'U', 'DOCTOR DEGREE', 200, 'GOOD', 1, 0,0); + +-- Start a transaction. +postgres=# START TRANSACTION; + +-- Update the column. +postgres=# UPDATE customer_demographics_t1 SET cd_education_status= 'Unknown'; + +-- Abort the transaction. All updates are rolled back. +postgres=# ABORT; + +-- Query data. +postgres=# SELECT * FROM customer_demographics_t1 WHERE cd_demo_sk = 1920801; +cd_demo_sk | cd_gender | cd_marital_status | cd_education_status | cd_purchase_estimate | cd_credit_rating | cd_dep_count | cd_dep_employed_count | cd_dep_college_count +------------+-----------+-------------------+----------------------+----------------------+------------------+--------------+-----------------------+---------------------- + 1920801 | M | U | DOCTOR DEGREE | 200 | GOOD | 1 | 0 | 0 +(1 row) + +-- Delete the table. +postgres=# DROP TABLE customer_demographics_t1; +``` + +## Helpful Links + +[SET TRANSACTION](set-transaction.md), [COMMIT | END](commit-end.md), and [ROLLBACK](rollback.md) + diff --git a/content/en/docs/Developerguide/additional-features.md b/content/en/docs/Developerguide/additional-features.md new file mode 100644 index 000000000..df2a2120c --- /dev/null +++ b/content/en/docs/Developerguide/additional-features.md @@ -0,0 +1,11 @@ +# Additional Features + +- **[Manipulating tsvector](manipulating-tsvector.md)** + +- **[Manipulating Queries](manipulating-queries.md)** + +- **[Rewriting Queries](rewriting-queries.md)** + +- **[Gathering Document Statistics](gathering-document-statistics.md)** + + diff --git a/content/en/docs/Developerguide/administration.md b/content/en/docs/Developerguide/administration.md new file mode 100644 index 000000000..9cde72408 --- /dev/null +++ b/content/en/docs/Developerguide/administration.md @@ -0,0 +1,41 @@ +# Administration + +\[Orange – Waiting for Vladi to review\] + +The following describes various MOT administration topics – + +- [Durability](durability.md#EN-US_TOPIC_0257867401) +- [Checkpoints](checkpoints.md#EN-US_TOPIC_0257867402) +- [Recovery](recovery-18.md#EN-US_TOPIC_0257867403) +- [Replication and High Availability](replication-and-high-availability.md#EN-US_TOPIC_0257867404) +- [Memory Management](memory-management.md#EN-US_TOPIC_0257867405) +- [Vacuum](vacuum.md#EN-US_TOPIC_0257867406) +- [MOT Statistics](mot-statistics.md#EN-US_TOPIC_0257867407) +- [Monitoring](monitoring.md#EN-US_TOPIC_0257867408) +- [MOT Error Messages](mot-error-messages.md#EN-US_TOPIC_0257867409) +- [Scale-out](scale-out.md#EN-US_TOPIC_0257867412) +- [Logging](logging.md#EN-US_TOPIC_0257867413) + +- **[Durability](durability.md)** + +- **[Checkpoints](checkpoints.md)** + +- **[Recovery](recovery-18.md)** + +- **[Replication and High Availability](replication-and-high-availability.md)** + +- **[Memory Management](memory-management.md)** + +- **[Vacuum](vacuum.md)** + +- **[MOT Statistics](mot-statistics.md)** + +- **[Monitoring](monitoring.md)** + +- **[MOT Error Messages](mot-error-messages.md)** + +- **[Scale-out](scale-out.md)** + +- **[Logging](logging.md)** + + diff --git a/content/en/docs/Developerguide/administrators.md b/content/en/docs/Developerguide/administrators.md new file mode 100644 index 000000000..563e08ccf --- /dev/null +++ b/content/en/docs/Developerguide/administrators.md @@ -0,0 +1,44 @@ +# Administrators + +## Initial Users + +The account automatically generated during openGauss installation is called an initial user. An initial user is the system, monitoring, O&M, and security policy administrator who has the highest-level permissions in the system and can perform all operations. The username is the same as the name of the OS user who installs openGauss. The initial password is **GaussDB@2012**. After the first login, change the initial password in time. + +An initial user bypasses all permission checks. You are advised to use an initial user as a database administrator only for database management other than service running. + +## System Administrators + +A system administrator is an account with the **SYSADMIN** attribute. By default, a database system administrator has the same permissions as object owners but does not have the object permissions in **dbe\_perf** mode. + +To create a system administrator, connect to the database as the initial user or a system administrator and run the **[CREATE USER](create-user.md)** or **[ALTER USER](alter-user.md)** statement with **SYSADMIN** specified. + +``` +postgres=# CREATE USER sysadmin WITH SYSADMIN password "Bigdata@123"; +``` + +or + +``` +postgres=# ALTER USER joe SYSADMIN; +``` + +To run the **ALTER USER** statement, the user must exist. + +## Monitoring Administrators + +A monitoring administrator is an account with the **MONADMIN** attribute and has the permissions to query views and functions in **dbe\_perf** mode. A monitoring administrator can also grant or revoke object permissions in **dbe\_perf** mode. + +To create a monitoring administrator, connect to the database as the initial user and run the **[CREATE USER](create-user.md)** or **[ALTER USER](alter-user.md)** statement with **MONADMIN** specified. + +``` +postgres=# CREATE USER monadmin WITH MONADMIN password "Bigdata@123"; +``` + +or + +``` +postgres=# ALTER USER joe MONADMIN; +``` + +To run the **ALTER USER** statement, the user must exist. + diff --git a/content/en/docs/Developerguide/advisory-lock-functions.md b/content/en/docs/Developerguide/advisory-lock-functions.md new file mode 100644 index 000000000..fec47572f --- /dev/null +++ b/content/en/docs/Developerguide/advisory-lock-functions.md @@ -0,0 +1,171 @@ +# Advisory Lock Functions + +Advisory lock functions manage advisory locks. + +- pg\_advisory\_lock\(key bigint\) + + Description: Obtains an exclusive session-level advisory lock. + + Return type: void + + Note: **pg\_advisory\_lock** locks resources defined by an application. The resources can be identified using a 64-bit or two nonoverlapped 32-bit key values. If another session locks the resources, the function blocks the resources until they can be used. The lock is exclusive. Multiple locking requests are pushed into the stack. Therefore, if the same resource is locked three times, it must be unlocked three times so that it is released to another session. + +- pg\_advisory\_lock\(key1 int, key2 int\) + + Description: Obtains an exclusive session-level advisory lock. + + Return type: void + + Note: Only users with the **sysadmin** permission can add session-level exclusive advisory locks to the key-value pair \(65535, 65535\). + +- pg\_advisory\_lock\_shared\(key bigint\) + + Description: Obtains a shared session-level advisory lock. + + Return type: void + +- pg\_advisory\_lock\_shared\(key1 int, key2 int\) + + Description: Obtains a shared session-level advisory lock. + + Return type: void + + Note: **pg\_advisory\_lock\_shared** works in the same way as **pg\_advisory\_lock**, except the lock can be shared with other sessions requesting shared locks. Only would-be exclusive lockers are locked out. + +- pg\_advisory\_unlock\(key bigint\) + + Description: Releases an exclusive session-level advisory lock. + + Return type: Boolean + +- pg\_advisory\_unlock\(key1 int, key2 int\) + + Description: Releases an exclusive session-level advisory lock. + + Return type: Boolean + + Note: **pg\_advisory\_unlock** releases the obtained exclusive advisory lock. If the release is successful, the function returns **true**. If the lock was not held, it will return **false**. In addition, a SQL warning will be reported by the server. + +- pg\_advisory\_unlock\_shared\(key bigint\) + + Description: Releases a shared session-level advisory lock. + + Return type: Boolean + +- pg\_advisory\_unlock\_shared\(key1 int, key2 int\) + + Description: Releases a shared session-level advisory lock. + + Return type: Boolean + + Note: **pg\_advisory\_unlock\_shared** works in the same way as **pg\_advisory\_unlock**, except it releases a shared session-level advisory lock. + +- pg\_advisory\_unlock\_all\(\) + + Description: Releases all advisory locks owned by the current session. + + Return type: void + + Note: **pg\_advisory\_unlock\_all** releases all advisory locks owned by the current session. The function is implicitly invoked when the session ends even if the client is abnormally disconnected. + +- pg\_advisory\_xact\_lock\(key bigint\) + + Description: Obtains an exclusive transaction-level advisory lock. + + Return type: void + +- pg\_advisory\_xact\_lock\(key1 int, key2 int\) + + Description: Obtains an exclusive transaction-level advisory lock. + + Return type: void + + Note: **pg\_advisory\_xact\_lock** works in the same way as **pg\_advisory\_lock**, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. Only users with the **sysadmin** permission can add transaction-level exclusive advisory locks to the key-value pair \(65535, 65535\). + +- pg\_advisory\_xact\_lock\_shared\(key bigint\) + + Description: Obtains a shared transaction-level advisory lock. + + Return type: void + +- pg\_advisory\_xact\_lock\_shared\(key1 int, key2 int\) + + Description: Obtains a shared transaction-level advisory lock. + + Return type: void + + Note: **pg\_advisory\_xact\_lock\_shared** works in the same way as **pg\_advisory\_lock\_shared**, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. + +- pg\_try\_advisory\_lock\(key bigint\) + + Description: Obtains an exclusive session-level advisory lock if available. + + Return type: Boolean + + Note: **pg\_try\_advisory\_lock** is similar to **pg\_advisory\_lock**, except **pg\_try\_advisory\_lock** does not block the resource until the resource is released. **pg\_try\_advisory\_lock** either immediately obtains the lock and returns **true** or returns **false**, which indicates the lock cannot be performed currently. + +- pg\_try\_advisory\_lock\(key1 int, key2 int\) + + Description: Obtains an exclusive session-level advisory lock if available. + + Return type: Boolean + + Note: Only users with the **sysadmin** permission can add session-level exclusive advisory locks to the key-value pair \(65535, 65535\). + +- pg\_try\_advisory\_lock\_shared\(key bigint\) + + Description: Obtains a shared session-level advisory lock if available. + + Return type: Boolean + +- pg\_try\_advisory\_lock\_shared\(key1 int, key2 int\) + + Description: Obtains a shared session-level advisory lock if available. + + Return type: Boolean + + Note: **pg\_try\_advisory\_lock\_shared** is similar to **pg\_try\_advisory\_lock**, except **pg\_try\_advisory\_lock\_shared** attempts to obtain a shared lock instead of an exclusive lock. + +- pg\_try\_advisory\_xact\_lock\(key bigint\) + + Description: Obtains an exclusive transaction-level advisory lock if available. + + Return type: Boolean + +- pg\_try\_advisory\_xact\_lock\(key1 int, key2 int\) + + Description: Obtains an exclusive transaction-level advisory lock if available. + + Return type: Boolean + + Note: **pg\_try\_advisory\_xact\_lock** works in the same way as **pg\_try\_advisory\_lock**, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. Note: Only users with the **sysadmin** permission can add transaction-level exclusive advisory locks to the key-value pair \(65535, 65535\). + +- pg\_try\_advisory\_xact\_lock\_shared\(key bigint\) + + Description: Obtains a shared transaction-level advisory lock if available. + + Return type: Boolean + +- pg\_try\_advisory\_xact\_lock\_shared\(key1 int, key2 int\) + + Description: Obtains a shared transaction-level advisory lock if available. + + Return type: Boolean + + Note: **pg\_try\_advisory\_xact\_lock\_shared** works in the same way as **pg\_try\_advisory\_lock\_shared**, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. + +- lock\_cluster\_ddl\(\) + + Description: Attempts to obtain a session-level exclusive advisory lock for all active primary database nodes in openGauss. + + Return type: Boolean + + Note: Only users with the **sysadmin** permission can call this function. + +- unlock\_cluster\_ddl\(\) + + Description: Attempts to add a session-level exclusive advisory lock on the primary database node. + + Return type: Boolean + + diff --git a/content/en/docs/Developerguide/aggregate-functions.md b/content/en/docs/Developerguide/aggregate-functions.md new file mode 100644 index 000000000..fa4c7d958 --- /dev/null +++ b/content/en/docs/Developerguide/aggregate-functions.md @@ -0,0 +1,673 @@ +# Aggregate Functions + +## Aggregate Functions + +- sum\(expression\) + + Description: Sum of expression across all input values + + Return type: + + Generally, same as the argument data type. In the following cases, type conversion occurs: + + - **BIGINT** for **SMALLINT** or **INT** arguments + - **NUMBER** for **BIGINT** arguments + - **DOUBLE PRECISION** for floating-point arguments + + Example: + + ``` + postgres=# SELECT SUM(ss_ext_tax) FROM tpcds.STORE_SALES; + sum + -------------- + 213267594.69 + (1 row) + ``` + +- max\(expression\) + + Description: maximum value of expression across all input values + + Argument types: any array, numeric, string, or date/time type + + Return type: same as the argument type + + Example: + + ``` + postgres=# SELECT MAX(inv_quantity_on_hand) FROM tpcds.inventory; + ``` + +- min\(expression\) + + Description: minimum value of expression across all input values + + Argument types: any array, numeric, string, or date/time type + + Return type: same as the argument type + + Example: + + ``` + postgres=# SELECT MIN(inv_quantity_on_hand) FROM tpcds.inventory; + min + ----- + 0 + (1 row) + ``` + +- avg\(expression\) + + Description: Average \(arithmetic mean\) of all input values + + Return type: + + **NUMBER** for any integer-type argument. + + **DOUBLE PRECISION** for a floating-point argument, + + otherwise the same as the argument data type. + + Example: + + ``` + postgres=# SELECT AVG(inv_quantity_on_hand) FROM tpcds.inventory; + avg + ---------------------- + 500.0387129084044604 + (1 row) + ``` + +- count\(expression\) + + Description: number of input rows for which the value of expression is not null + + Return type: bigint + + Example: + + ``` + postgres=# SELECT COUNT(inv_quantity_on_hand) FROM tpcds.inventory; + count + ---------- + 11158087 + (1 row) + ``` + +- count\(\*\) + + Description: number of input rows + + Return type: bigint + + Example: + + ``` + postgres=# SELECT COUNT(*) FROM tpcds.inventory; + count + ---------- + 11745000 + (1 row) + ``` + +- array\_agg\(expression\) + + Description: input values, including nulls, concatenated into an array + + Return type: array of the argument type + + Example: + + ``` + postgres=# SELECT ARRAY_AGG(sr_fee) FROM tpcds.store_returns WHERE sr_customer_sk = 2; + array_agg + --------------- + {22.18,63.21} + (1 row) + ``` + +- string\_agg\(expression, delimiter\) + + Description: input values concatenated into a string, separated by delimiter + + Return type: same as the argument type + + Example: + + ``` + postgres=# SELECT string_agg(sr_item_sk, ',') FROM tpcds.store_returns where sr_item_sk < 3; + string_agg + --------------------------------------------------------------------------------- + ------------------------------ + 1,2,1,2,2,1,1,2,2,1,2,1,2,1,1,1,2,1,1,1,1,1,2,1,1,1,1,1,2,2,1,1,1,1,1,1,1,1,1,2, + 2,1,1,1,1,1,1,2,2,1,1,2,1,1,1 + (1 row) + ``` + +- listagg\(expression \[, delimiter\]\) WITHIN GROUP\(ORDER BY order-list\) + + Description: aggregation column data sorted according to the mode specified by **WITHIN GROUP**, and concatenated to a string using the specified delimiter + + - **expression**: Mandatory. It specifies an aggregation column name or a column-based, valid expression. It does not support the **DISTINCT** keyword and the **VARIADIC** parameter. + - **delimiter**: Optional. It specifies a delimiter, which can be a string constant or a deterministic expression based on a group of columns. The default value is empty. + - **order-list**: Mandatory. It specifies the sorting mode in a group. + + Return type: text + + Example: + + The aggregation column is of the text character set type. + + ``` + postgres=# SELECT deptno, listagg(ename, ',') WITHIN GROUP(ORDER BY ename) AS employees FROM emp GROUP BY deptno; + deptno | employees + --------+-------------------------------------- + 10 | CLARK,KING,MILLER + 20 | ADAMS,FORD,JONES,SCOTT,SMITH + 30 | ALLEN,BLAKE,JAMES,MARTIN,TURNER,WARD + (3 rows) + ``` + + The aggregation column is of the integer type. + + ``` + postgres=# SELECT deptno, listagg(mgrno, ',') WITHIN GROUP(ORDER BY mgrno NULLS FIRST) AS mgrnos FROM emp GROUP BY deptno; + deptno | mgrnos + --------+------------------------------- + 10 | 7782,7839 + 20 | 7566,7566,7788,7839,7902 + 30 | 7698,7698,7698,7698,7698,7839 + (3 rows) + ``` + + The aggregation column is of the floating point type. + + ``` + postgres=# SELECT job, listagg(bonus, '($); ') WITHIN GROUP(ORDER BY bonus DESC) || '($)' AS bonus FROM emp GROUP BY job; + job | bonus + ------------+------------------------------------------------- + CLERK | 10234.21($); 2000.80($); 1100.00($); 1000.22($) + PRESIDENT | 23011.88($) + ANALYST | 2002.12($); 1001.01($) + MANAGER | 10000.01($); 2399.50($); 999.10($) + SALESMAN | 1000.01($); 899.00($); 99.99($); 9.00($) + (5 rows) + ``` + + The aggregation column is of the time type. + + ``` + postgres=# SELECT deptno, listagg(hiredate, ', ') WITHIN GROUP(ORDER BY hiredate DESC) AS hiredates FROM emp GROUP BY deptno; + deptno | hiredates + --------+------------------------------------------------------------------------------------------------------------------------------ + 10 | 1982-01-23 00:00:00, 1981-11-17 00:00:00, 1981-06-09 00:00:00 + 20 | 2001-04-02 00:00:00, 1999-12-17 00:00:00, 1987-05-23 00:00:00, 1987-04-19 00:00:00, 1981-12-03 00:00:00 + 30 | 2015-02-20 00:00:00, 2010-02-22 00:00:00, 1997-09-28 00:00:00, 1981-12-03 00:00:00, 1981-09-08 00:00:00, 1981-05-01 00:00:00 + (3 rows) + ``` + + The aggregation column is of the time interval type. + + ``` + postgres=# SELECT deptno, listagg(vacationTime, '; ') WITHIN GROUP(ORDER BY vacationTime DESC) AS vacationTime FROM emp GROUP BY deptno; + deptno | vacationtime + --------+------------------------------------------------------------------------------------ + 10 | 1 year 30 days; 40 days; 10 days + 20 | 70 days; 36 days; 9 days; 5 days + 30 | 1 year 1 mon; 2 mons 10 days; 30 days; 12 days 12:00:00; 4 days 06:00:00; 24:00:00 + (3 rows) + ``` + + By default, the delimiter is empty. + + ``` + postgres=# SELECT deptno, listagg(job) WITHIN GROUP(ORDER BY job) AS jobs FROM emp GROUP BY deptno; + deptno | jobs + --------+---------------------------------------------- + 10 | CLERKMANAGERPRESIDENT + 20 | ANALYSTANALYSTCLERKCLERKMANAGER + 30 | CLERKMANAGERSALESMANSALESMANSALESMANSALESMAN + (3 rows) + ``` + + When **listagg** is used as a window function, the **OVER** clause does not support the window sorting of **ORDER BY**, and the **listagg** column is an ordered aggregation of the corresponding groups. + + ``` + postgres=# SELECT deptno, mgrno, bonus, listagg(ename,'; ') WITHIN GROUP(ORDER BY hiredate) OVER(PARTITION BY deptno) AS employees FROM emp; + deptno | mgrno | bonus | employees + --------+-------+----------+------------------------------------------- + 10 | 7839 | 10000.01 | CLARK; KING; MILLER + 10 | | 23011.88 | CLARK; KING; MILLER + 10 | 7782 | 10234.21 | CLARK; KING; MILLER + 20 | 7566 | 2002.12 | FORD; SCOTT; ADAMS; SMITH; JONES + 20 | 7566 | 1001.01 | FORD; SCOTT; ADAMS; SMITH; JONES + 20 | 7788 | 1100.00 | FORD; SCOTT; ADAMS; SMITH; JONES + 20 | 7902 | 2000.80 | FORD; SCOTT; ADAMS; SMITH; JONES + 20 | 7839 | 999.10 | FORD; SCOTT; ADAMS; SMITH; JONES + 30 | 7839 | 2399.50 | BLAKE; TURNER; JAMES; MARTIN; WARD; ALLEN + 30 | 7698 | 9.00 | BLAKE; TURNER; JAMES; MARTIN; WARD; ALLEN + 30 | 7698 | 1000.22 | BLAKE; TURNER; JAMES; MARTIN; WARD; ALLEN + 30 | 7698 | 99.99 | BLAKE; TURNER; JAMES; MARTIN; WARD; ALLEN + 30 | 7698 | 1000.01 | BLAKE; TURNER; JAMES; MARTIN; WARD; ALLEN + 30 | 7698 | 899.00 | BLAKE; TURNER; JAMES; MARTIN; WARD; ALLEN + (14 rows) + ``` + +- covar\_pop\(Y, X\) + + Description: overall covariance + + Return type: double precision + + Example: + + ``` + postgres=# SELECT COVAR_POP(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + covar_pop + ------------------ + 829.749627587403 + (1 row) + ``` + +- covar\_samp\(Y, X\) + + Description: sample covariance + + Return type: double precision + + Example: + + ``` + postgres=# SELECT COVAR_SAMP(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + covar_samp + ------------------ + 830.052235037289 + (1 row) + ``` + +- stddev\_pop\(expression\) + + Description: overall standard difference + + Return type: **double precision** for floating-point arguments, otherwise **numeric** + + Example: + + ``` + postgres=# SELECT STDDEV_POP(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + stddev_pop + ------------------ + 289.224294957556 + (1 row) + ``` + +- stddev\_samp\(expression\) + + Description: sample standard deviation of the input values + + Return type: **double precision** for floating-point arguments, otherwise **numeric** + + Example: + + ``` + postgres=# SELECT STDDEV_SAMP(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + stddev_samp + ------------------ + 289.224359757315 + (1 row) + ``` + +- var\_pop\(expression\) + + Description: population variance of the input values \(square of the population standard deviation\) + + Return type: **double precision** for floating-point arguments, otherwise **numeric** + + Example: + + ``` + postgres=# SELECT VAR_POP(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + var_pop + -------------------- + 83650.692793695475 + (1 row) + ``` + +- var\_samp\(expression\) + + Description: sample variance of the input values \(square of the sample standard deviation\) + + Return type: **double precision** for floating-point arguments, otherwise **numeric** + + Example: + + ``` + postgres=# SELECT VAR_SAMP(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + var_samp + -------------------- + 83650.730277028768 + (1 row) + ``` + +- bit\_and\(expression\) + + Description: the bitwise AND of all non-null input values, or null if none + + Return type: same as the argument type + + Example: + + ``` + postgres=# SELECT BIT_AND(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + bit_and + --------- + 0 + (1 row) + ``` + +- bit\_or\(expression\) + + Description: the bitwise OR of all non-null input values, or null if none + + Return type: same as the argument type + + Example: + + ``` + postgres=# SELECT BIT_OR(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + bit_or + -------- + 1023 + (1 row) + ``` + +- bool\_and\(expression\) + + Description: Its value is **true** if all input values are **true**, otherwise **false**. + + Return type: bool + + Example: + + ``` + postgres=# SELECT bool_and(100 <2500); + bool_and + ---------- + t + (1 row) + ``` + +- bool\_or\(expression\) + + Description: Its value is **true** if at least one input value is **true**, otherwise **false**. + + Return type: bool + + Example: + + ``` + postgres=# SELECT bool_or(100 <2500); + bool_or + ---------- + t + (1 row) + ``` + +- corr\(Y, X\) + + Description: correlation coefficient + + Return type: double precision + + Example: + + ``` + postgres=# SELECT CORR(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + corr + ------------------- + .0381383624904186 + (1 row) + ``` + +- every\(expression\) + + Description: equivalent to **bool\_and** + + Return type: bool + + Example: + + ``` + postgres=# SELECT every(100 <2500); + every + ------- + t + (1 row) + ``` + +- regr\_avgx\(Y, X\) + + Description: average of the independent variable \(**sum\(X\)/N**\) + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_AVGX(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_avgx + ------------------ + 578.606576740795 + (1 row) + ``` + +- regr\_avgy\(Y, X\) + + Description: average of the dependent variable \(**sum\(Y\)/N**\) + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_AVGY(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_avgy + ------------------ + 50.0136711629602 + (1 row) + ``` + +- regr\_count\(Y, X\) + + Description: number of input rows in which both expressions are non-null + + Return type: bigint + + Example: + + ``` + postgres=# SELECT REGR_COUNT(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_count + ------------ + 2743 + (1 row) + ``` + +- regr\_intercept\(Y, X\) + + Description: y-intercept of the least-squares-fit linear equation determined by the \(X, Y\) pairs + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_INTERCEPT(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_intercept + ------------------ + 49.2040847848607 + (1 row) + ``` + +- regr\_r2\(Y, X\) + + Description: square of the correlation coefficient + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_R2(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_r2 + -------------------- + .00145453469345058 + (1 row) + ``` + +- regr\_slope\(Y, X\) + + Description: slope of the least-squares-fit linear equation determined by the \(X, Y\) pairs + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_SLOPE(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_slope + -------------------- + .00139920009665259 + (1 row) + ``` + +- regr\_sxx\(Y, X\) + + Description: **sum\(X^2\) - sum\(X\)^2/N **\(sum of squares of the independent variables\) + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_SXX(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_sxx + ------------------ + 1626645991.46135 + (1 row) + ``` + +- regr\_sxy\(Y, X\) + + Description: **sum\(X\*Y\) - sum\(X\) \* sum\(Y\)/N** \("sum of products" of independent times dependent variable\) + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_SXY(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_sxy + ------------------ + 2276003.22847225 + (1 row) + ``` + +- regr\_syy\(Y, X\) + + Description: **sum\(Y^2\) - sum\(Y\)^2/N** \("sum of squares" of the dependent variable\) + + Return type: double precision + + Example: + + ``` + postgres=# SELECT REGR_SYY(sr_fee, sr_net_loss) FROM tpcds.store_returns WHERE sr_customer_sk < 1000; + regr_syy + ----------------- + 2189417.6547314 + (1 row) + ``` + +- stddev\(expression\) + + Description: alias of **stddev\_samp** + + Return type: **double precision** for floating-point arguments, otherwise **numeric** + + Example: + + ``` + postgres=# SELECT STDDEV(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + stddev + ------------------ + 289.224359757315 + (1 row) + ``` + +- variance\(expexpression,ression\) + + Description: alias of **var\_samp** + + Return type: **double precision** for floating-point arguments, otherwise **numeric** + + Example: + + ``` + postgres=# SELECT VARIANCE(inv_quantity_on_hand) FROM tpcds.inventory WHERE inv_warehouse_sk = 1; + variance + -------------------- + 83650.730277028768 + (1 row) + ``` + +- checksum\(expression\) + + Description: Returns the CHECKSUM value of all input values. This function can be used to check whether the data in the tables is the same before and after the backup, restoration, or migration of the openGauss database \(databases other than openGauss are not supported\). Before and after database backup, database restoration, or data migration, you need to manually run SQL commands to obtain the execution results. Compare the obtained execution results to check whether the data in the tables before and after the backup or migration is the same. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- For large tables, the execution of CHECKSUM function may take a long time. + >- If the CHECKSUM values of two tables are different, it indicates that the contents of the two tables are different. Using the hash function in the CHECKSUM function may incur conflicts. There is low possibility that two tables with different contents may have the same CHECKSUM value. The same problem may occur when CHECKSUM is used for columns. + >- If the time type is timestamp, timestamptz, or smalldatetime, ensure that the time zone settings are the same when calculating the CHECKSUM value. + + - If the CHECKSUM value of a column is calculated and the column type can be changed to TEXT by default, set _expression_ to the column name. + - If the CHECKSUM value of a column is calculated and the column type cannot be converted to TEXT by default, set _expression_ to _Column name_**::TEXT**. + - If the CHECKSUM value of all columns is calculated, set _expression_ to _Table name_**::TEXT**. + + The following types of data can be converted into TEXT types by default: char, name, int8, int2, int1, int4, raw, pg\_node\_tree, float4, float8, bpchar, varchar, nvarchar2, date, timestamp, timestamptz, numeric, and smalldatetime. Other types need to be forcibly converted to TEXT. + + Return type: numeric + + Example: + + The following shows the CHECKSUM value of a column that can be converted to the TEXT type by default: + + ``` + postgres=# SELECT CHECKSUM(inv_quantity_on_hand) FROM tpcds.inventory; + checksum + ------------------- + 24417258945265247 + (1 row) + ``` + + The following shows the CHECKSUM value of a column that cannot be converted to the TEXT type by default. Note that the CHECKSUM parameter is set to _Column name_**::TEXT**. + + ``` + postgres=# SELECT CHECKSUM(inv_quantity_on_hand::TEXT) FROM tpcds.inventory; + checksum + ------------------- + 24417258945265247 + (1 row) + ``` + + The following shows the CHECKSUM value of all columns in a table. Note that the CHECKSUM parameter is set to _Table name_**::TEXT**. The table name is not modified by its schema. + + ``` + postgres=# SELECT CHECKSUM(inventory::TEXT) FROM tpcds.inventory; + checksum + ------------------- + 25223696246875800 + (1 row) + ``` + + diff --git a/content/en/docs/Developerguide/ai-features.md b/content/en/docs/Developerguide/ai-features.md new file mode 100644 index 000000000..7d5c36dac --- /dev/null +++ b/content/en/docs/Developerguide/ai-features.md @@ -0,0 +1,9 @@ +# AI Features + +- **[Predictor](predictor.md)** + +- **[X-Tuner](x-tuner.md)** + +- **[SQLdiag](sqldiag.md)** + + diff --git a/content/en/docs/Developerguide/alarm-detection.md b/content/en/docs/Developerguide/alarm-detection.md new file mode 100644 index 000000000..c948b7d34 --- /dev/null +++ b/content/en/docs/Developerguide/alarm-detection.md @@ -0,0 +1,47 @@ +# Alarm Detection + +During the running of the openGauss, error scenarios can be detected and informed to users in time. + +## enable\_alarm + +**Parameter description**: Specifies whether to enable the alarm detection thread to detect fault scenarios that may occur in the database. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the alarm detection thread is enabled. +- **off** indicates that the alarm detection thread is disabled. + +**Default value**: **on** + +## connection\_alarm\_rate + +**Parameter description**: Specifies the ratio restriction on the maximum number of allowed parallel connections to the database. The maximum number of concurrent connections to the database is [max\_connections](connection-settings.md#en-us_topic_0237124695_en-us_topic_0059777636_sa723b719fa70453bb7ec27f323d41c79) x **connection\_alarm\_rate**. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range:** a floating point number ranging from 0.0 to 1.0 + +**Default value**: **0.9** + +## alarm\_report\_interval + +**Parameter description**: specifies the interval at which an alarm is reported. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer. The unit is s. + +**Default value:** **10** + +## alarm\_component + +**Parameter description**: Certain alarms are suppressed during alarm reporting. That is, the same alarm will not be repeatedly reported by an instance within the period specified by **alarm\_report\_interval**. Its default value is **10s**. In this case, the parameter specifies the location of the alarm component that is used to process alarm information. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +**Default value**: **/opt/huawei/snas/bin/snas\_cm\_cmd** + diff --git a/content/en/docs/Developerguide/alter-data-source.md b/content/en/docs/Developerguide/alter-data-source.md new file mode 100644 index 000000000..26df4f0a7 --- /dev/null +++ b/content/en/docs/Developerguide/alter-data-source.md @@ -0,0 +1,103 @@ +# ALTER DATA SOURCE + +## Function + +**ALTER DATA SOURCE** modifies the attributes and content of the data source. + +The attributes include the name and owner. The content includes the type, version, and connection options. + +## Precautions + +- Only the initial user, system administrator, and owner have the permission to modify data sources. +- To change the owner, the new owner must be the initial user or a system administrator. +- If the **password** option is displayed, ensure that the **datasource.key.cipher** and **datasource.key.rand** files exist in the _$GAUSSHOME_**/bin** directory of each node in openGauss. If the two files do not exist, use the **gs\_guc** tool to generate them and use the **gs\_ssh** tool to release them to the _$GAUSSHOME_**/bin** directory on each node in openGauss. + +## Syntax + +``` +ALTER DATA SOURCE src_name + [TYPE 'type_str'] + [VERSION {'version_str' | NULL}] + [OPTIONS ( {[ ADD | SET | DROP ] optname ['optvalue']} [, ...] )]; +ALTER DATA SOURCE src_name RENAME TO src_new_name; +ALTER DATA SOURCE src_name OWNER TO new_owner; +``` + +## Parameter Description + +- **src\_name** + + Specifies the data source name to be modified. + + Value range: a string. It must comply with the naming convention rule. + +- **TYPE** + + Changes the original **TYPE** value of the data source to the specified value. + + Value range: an empty string or a non-empty string + +- **VERSION** + + Changes the original **VERSION** value of the data source to the specified value. + + Value range: an empty string, a non-empty string, or null + +- **OPTIONS** + + Specifies the column to be added, modified, or deleted. The value of optname should be unique. Comply with the following rules to set this parameter: + + To add a column, you can omit **ADD** and simply specify the column name, which cannot be an existing column name. + + To modify a column, specify **SET** and an existing column name. + + To delete a column, specify **DROP** and an existing column name. Do not set **optvalue**. + +- **src\_new\_name** + + Specifies the new data source name. + + Value range: a string. It must comply with the naming convention rule. + +- **new\_user** + + Specifies the new owner of an object. + + Value range: a string. It must be a valid username. + + +## Examples + +``` +-- Create an empty data source. +postgres=# CREATE DATA SOURCE ds_test1; + +-- Rename the data source. +postgres=# ALTER DATA SOURCE ds_test1 RENAME TO ds_test; + +-- Change the owner. +postgres=# CREATE USER user_test1 IDENTIFIED BY 'Gs@123456'; +postgres=# ALTER USER user_test1 WITH SYSADMIN; +postgres=# ALTER DATA SOURCE ds_test OWNER TO user_test1; + +-- Modify TYPE and VERSION. +postgres=# ALTER DATA SOURCE ds_test TYPE 'MPPDB_TYPE' VERSION 'XXX'; + +-- Add a column. +postgres=# ALTER DATA SOURCE ds_test OPTIONS (add dsn 'gaussdb', username 'test_user'); + +-- Modify a column. +postgres=# ALTER DATA SOURCE ds_test OPTIONS (set dsn 'unknown'); + +-- Delete a column. +postgres=# ALTER DATA SOURCE ds_test OPTIONS (drop username); + +-- Delete the data source and user objects. +postgres=# DROP DATA SOURCE ds_test; +postgres=# DROP USER user_test1; +``` + +## Helpful Links + +[CREATE DATA SOURCE](create-data-source.md) and [DROP DATA SOURCE](drop-data-source.md) + diff --git a/content/en/docs/Developerguide/alter-database.md b/content/en/docs/Developerguide/alter-database.md new file mode 100644 index 000000000..77a76473c --- /dev/null +++ b/content/en/docs/Developerguide/alter-database.md @@ -0,0 +1,142 @@ +# ALTER DATABASE + +## Function + +**ALTER DATABASE** modifies a database, including its name, owner, connection limitation, and object isolation. + +## Precautions + +- Only the owner of a database or a system administrator has the permission to run the **ALTER DATABASE** statement. Users other than system administrators may have the following permission constraints depending on the attributes to be modified: + - To modify the database name, you must have the **CREATEDB** permission. + - To modify a database owner, you must be a database owner and a member of the new owner, and have the **CREATEDB** permission. + - To change the default tablespace, you must be a database owner or a system administrator, and must have the **CREATE** permission on the new tablespace. This statement physically migrates tables and indexes in a default tablespace to a new tablespace. Note that tables and indexes outside the default tablespace are not affected. + - Only a database owner or a system administrator can modify GUC parameters for the database. + - Only database owners and system administrators can modify the object isolation attribute of a database. + +- You are not allowed to rename a database in use. To rename it, connect to another database. + +## Syntax + +- Modify the maximum number of connections to the database. + + ``` + ALTER DATABASE database_name + [ [ WITH ] CONNECTION LIMIT connlimit ]; + ``` + +- Rename the database. + + ``` + ALTER DATABASE database_name + RENAME TO new_name; + ``` + +- Change the database owner. + + ``` + ALTER DATABASE database_name + OWNER TO new_owner; + ``` + +- Change the default tablespace of the database. + + ``` + ALTER DATABASE database_name + SET TABLESPACE new_tablespace; + ``` + +- Modify the session parameter value of the database. + + ``` + ALTER DATABASE database_name + SET configuration_parameter { { TO | = } { value | DEFAULT } | FROM CURRENT }; + ``` + +- Reset the database configuration parameter. + + ``` + ALTER DATABASE database_name RESET + { configuration_parameter | ALL }; + ``` + + +- Modify the object isolation attribute of the database. + + ``` + ALTER DATABASE database_name [ WITH ] { ENABLE | DISABLE } PRIVATE OBJECT; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- To modify the object isolation attribute of a database, the database must be connected. Otherwise, the modification will fail. + >- For a new database, the object isolation attribute is disabled by default. After this attribute is enabled, common users can view only the objects \(such as tables, functions, views, and columns\) that they have the permission to access. This attribute does not take effect for administrators. After this attribute is enabled, administrators can still view all database objects. + + +## Parameter Description + +- **database\_name** + + Specifies the name of the database whose attributes are to be modified. + + Value range: a string. It must comply with the naming convention rule. + +- **connlimit** + + Specifies the maximum number of concurrent connections that can be made to this database \(excluding administrators' connections\). + + Value range: The value must be an integer, preferably from 1 to 50. The default value **-1** indicates that there is no restriction on the number of concurrent connections. + +- **new\_name** + + Specifies the new name of a database. + + Value range: a string. It must comply with the naming convention rule. + +- **new\_owner** + + Specifies the new owner of a database. + + Value range: a string. It must be a valid username. + +- **new\_tablespace** + + Specifies the new default tablespace of a database. The tablespace exists in the database. The default tablespace is **pg\_default**. + + Value range: a string. It must be a valid tablespace name. + +- **configuration\_parameter** + + **value** + + Sets a specified database session parameter to a specified value. If the value is **DEFAULT** or **RESET**, the default setting is used in the new session. **OFF** closes the setting. + + Value range: a string + + - DEFAULT + - OFF + - RESET + +- **FROM CURRENT** + + Sets the value of the database based on the current connected session. + +- **RESET configuration\_parameter** + + Resets the specified database session parameter. + +- **RESET ALL** + + Resets all database session parameters. + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- Modify the default tablespace of a database by moving the table or index in the old tablespace into the new tablespace. This operation does not affect the tables or indexes in other non-default tablespaces. +>- The modified database session parameter values will take effect in the next session. + +## Examples + +See [Examples](create-database.md#en-us_topic_0237122099_en-us_topic_0059778277_s6be7b8abbb4b4aceb9dae686434d672c) in **CREATE DATABASE**. + +## Helpful Links + +[CREATE DATABASE](create-database.md) and [DROP DATABASE](drop-database.md) + diff --git a/content/en/docs/Developerguide/alter-default-privileges.md b/content/en/docs/Developerguide/alter-default-privileges.md new file mode 100644 index 000000000..1c7205175 --- /dev/null +++ b/content/en/docs/Developerguide/alter-default-privileges.md @@ -0,0 +1,141 @@ +# ALTER DEFAULT PRIVILEGES + +## Function + +**ALTER DEFAULT PRIVILEGES** allows you to set the permissions that will be applied to objects created in the future. \(It does not affect permissions granted to existing objects.\) + +## Precautions + +Only the permissions for tables \(including views\), functions, and types \(including domains\) can be altered. + +## Syntax + +``` +ALTER DEFAULT PRIVILEGES + [ FOR { ROLE | USER } target_role [, ...] ] + [ IN SCHEMA schema_name [, ...] ] + abbreviated_grant_or_revoke; +``` + +- **abbreviated\_grant\_or\_revoke** grants or revokes permissions on some objects. + + ``` + grant_on_tables_clause + | grant_on_functions_clause + | grant_on_types_clause + | revoke_on_tables_clause + | revoke_on_functions_clause + | revoke_on_types_clause + ``` + + +- **grant\_on\_tables\_clause** grants permissions on tables. + + ``` + GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES } + [, ...] | ALL [ PRIVILEGES ] } + ON TABLES + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ] + ``` + +- **grant\_on\_functions\_clause** grants permissions on functions. + + ``` + GRANT { EXECUTE | ALL [ PRIVILEGES ] } + ON FUNCTIONS + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ] + ``` + +- **grant\_on\_types\_clause** grants permissions on types. + + ``` + GRANT { USAGE | ALL [ PRIVILEGES ] } + ON TYPES + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ] + ``` + +- **revoke\_on\_tables\_clause** revokes permissions on tables. + + ``` + REVOKE [ GRANT OPTION FOR ] + { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES } + [, ...] | ALL [ PRIVILEGES ] } + ON TABLES + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT | CASCADE CONSTRAINTS ] + ``` + +- **revoke\_on\_functions\_clause** revokes permissions on functions. + + ``` + REVOKE [ GRANT OPTION FOR ] + { EXECUTE | ALL [ PRIVILEGES ] } + ON FUNCTIONS + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT | CASCADE CONSTRAINTS ] + ``` + +- **revoke\_on\_types\_clause** revokes permissions on types. + + ``` + REVOKE [ GRANT OPTION FOR ] + { USAGE | ALL [ PRIVILEGES ] } + ON TYPES + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT | CASCADE CONSTRAINTS ] + ``` + + +## Parameter Description + +- **target\_role** + + Specifies the name of an existing role. If **FOR ROLE/USER** is omitted, the current role is assumed. + + Value range: an existing role name + +- **schema\_name** + + Specifies the name of an existing schema. + + **target\_role** must have the **CREATE** permission for **schema\_name**. + + Value range: an existing schema name + +- **role\_name** + + Specifies the name of an existing role to grant or revoke permissions for. + + Value range: an existing role name + + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>To drop a role for which the default permissions have been granted, reverse the changes in its default permissions or use **DROP OWNED BY** to get rid of the default permission entry for the role. + +## Example + +``` +-- Grant the SELECT permission on all the tables (and views) in tpcds to every user. +postgres=# ALTER DEFAULT PRIVILEGES IN SCHEMA tpcds GRANT SELECT ON TABLES TO PUBLIC; + +-- Create a common user jack. +postgres=# CREATE USER jack PASSWORD 'Bigdata@123'; + +-- Grant the INSERT permission on all the tables in tpcds to the user jack. +postgres=# ALTER DEFAULT PRIVILEGES IN SCHEMA tpcds GRANT INSERT ON TABLES TO jack; + +-- Revoke the preceding permissions. +postgres=# ALTER DEFAULT PRIVILEGES IN SCHEMA tpcds REVOKE SELECT ON TABLES FROM PUBLIC; +postgres=# ALTER DEFAULT PRIVILEGES IN SCHEMA tpcds REVOKE INSERT ON TABLES FROM jack; + +-- Delete user jack. +postgres=# DROP USER jack; +``` + +## Helpful Links + +[GRANT](grant.md) and [REVOKE](revoke.md) + diff --git a/content/en/docs/Developerguide/alter-directory.md b/content/en/docs/Developerguide/alter-directory.md new file mode 100644 index 000000000..7ccf16070 --- /dev/null +++ b/content/en/docs/Developerguide/alter-directory.md @@ -0,0 +1,41 @@ +# ALTER DIRECTORY + +## Function + +**ALTER DIRECTORY** modifies a directory. + +## Precautions + +- Currently, only the directory owner can be changed. +- The owner can only be a user with the **sysadmin** permission. + +## Syntax + +``` +ALTER DIRECTORY directory_name + OWNER TO new_owner; +``` + +## Parameter Description + +**directory\_name** + +Specifies the name of a directory to be modified. The value must be an existing directory name. + +## Examples + +``` +-- Create a directory. +postgres=# CREATE OR REPLACE DIRECTORY dir as '/tmp/'; + +-- Change the owner of the directory. +postgres=# ALTER DIRECTORY dir OWNER TO system; + +-- Delete the foreign table. +postgres=# DROP DIRECTORY dir; +``` + +## Helpful Links + +[CREATE DIRECTORY](create-directory.md) and [DROP DIRECTORY](drop-directory.md) + diff --git a/content/en/docs/Developerguide/alter-function.md b/content/en/docs/Developerguide/alter-function.md new file mode 100644 index 000000000..b1b841270 --- /dev/null +++ b/content/en/docs/Developerguide/alter-function.md @@ -0,0 +1,190 @@ +# ALTER FUNCTION + +## Function + +**ALTER FUNCTION** modifies the attributes of a customized function. + +## Precautions + +Only the owner of the function or a system administrator has the permission to run this statement. If a function involves operations on temporary tables, **ALTER FUNCTION** cannot be used. + +## Syntax + +- Modify the additional parameters of the customized function. + + ``` + ALTER FUNCTION function_name ( [ { [ argmode ] [ argname ] argtype} [, ...] ] ) + action [ ... ] [ RESTRICT ]; + ``` + + The syntax of the **action** clause is as follows: + + ``` + {CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT} + | {IMMUTABLE | STABLE | VOLATILE} + | {SHIPPABLE | NOT SHIPPABLE} + | {NOT FENCED | FENCED} + | [ NOT ] LEAKPROOF + | { [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER } + | AUTHID { DEFINER | CURRENT_USER } + | COST execution_cost + | ROWS result_rows + | SET configuration_parameter { { TO | = } { value | DEFAULT }| FROM CURRENT} + | RESET {configuration_parameter | ALL} + ``` + +- Rename the customized function. + + ``` + ALTER FUNCTION funname ( [ { [ argmode ] [ argname ] argtype} [, ...] ] ) + RENAME TO new_name; + ``` + +- Change the owner of the customized function. + + ``` + ALTER FUNCTION funname ( [ { [ argmode ] [ argname ] argtype} [, ...] ] ) + OWNER TO new_owner; + ``` + +- Modify the schema of the customized function. + + ``` + ALTER FUNCTION funname ( [ { [ argmode ] [ argname ] argtype} [, ...] ] ) + SET SCHEMA new_schema; + ``` + + +## Parameter Description + +- **function\_name** + + Specifies the name of the function to be modified. + + Value range: an existing function name + +- **argmode** + + Specifies whether a parameter is an input or output parameter. + + Value range: **IN**, **OUT**, and **IN OUT** + +- **argname** + + Parameter name. + + Value range: a string. It must comply with the naming convention rule. + +- **argtype** + + Parameter type. + + Value range: a valid type. For details, see [Data Types](data-types.md). + +- **CALLED ON NULL INPUT** + + Declares that some parameters of the function can be invoked in normal mode if the parameter values are null. Omitting this parameter is the same as specifying it. + +- **RETURNS NULL ON NULL INPUT** + + **STRICT** + + Specifies that the function always returns null whenever any of its parameters is null. If **STRICT** is specified, the function will not be executed when there are null parameters; instead a null result is assumed automatically. + + **RETURNS NULL ON NULL INPUT** and **STRICT** have the same functions. + +- **IMMUTABLE** + + Specifies that the function always returns the same result if the parameter values are the same. + +- **STABLE** + + Specifies that the function cannot modify the database, and that within a single table scan it will consistently return the same result for the same parameter value, but its result varies by SQL statements. + +- **VOLATILE** + + Specifies that the function value can change in a single table scan and no optimization is performed. + +- **LEAKPROOF** + + Specifies that the function has no side effect and the parameter contains only the return value. **LEAKROOF** can be set only by a system administrator. + +- **EXTERNAL** + + \(Optional\) The purpose is to be compatible with SQL. This feature applies to all functions, not only external functions. + +- **SECURITY INVOKER** + + **AUTHID CURREN\_USER** + + Specifies that the function will be executed with the permissions of the user who invokes it. Omitting this parameter is the same as specifying it. + + **SECURITY INVOKER** and **AUTHID CURRENT\_USER** have the same functions. + +- **SECURITY DEFINER** + + **AUTHID DEFINER** + + Specifies that the function will be executed with the permissions of the user who created it. + + **AUTHID DEFINER** and **SECURITY DEFINER** have the same function. + +- **COST execution\_cost** + + Estimates the execution cost of a function. + + The unit of **execution\_cost** is **cpu\_operator\_cost**. + + Value range: a positive integer + +- **ROWS result\_rows** + + Estimates the number of rows returned by the function. This is only allowed when the function is declared to return a set. + + Value range: a positive number. The default value is **1000**. + +- **configuration\_parameter** + - **value** + + Sets a specified database session parameter to a specified value. If the value is **DEFAULT** or **RESET**, the default setting is used in the new session. **OFF** closes the setting. + + Value range: a string + + - DEFAULT + - OFF + - RESET + + Specifies the default value. + + - **from current** + + Uses the value of **configuration\_parameter** of the current session. + + +- **new\_name** + + Specifies the new name of a function. To change the schema of a function, you must have the **CREATE** permission on the new schema. + + Value range: a string. It must comply with the naming convention rule. + +- **new\_owner** + + Specifies the new owner of a function. To change the owner of a function, the new owner must have the **CREATE** permission on the schema to which the function belongs. + + Value range: an existing user role + +- **new\_schema** + + Specifies the new schema of a function. + + Value range: an existing schema + + +## Examples + +See [Examples](create-function.md#en-us_topic_0237122104_en-us_topic_0059778837_scc61c5d3cc3e48c1a1ef323652dda821) in **CREATE FUNCTION**. + +## Helpful Links + +[CREATE FUNCTION](create-function.md) and [DROP FUNCTION](drop-function.md) + diff --git a/content/en/docs/Developerguide/alter-group.md b/content/en/docs/Developerguide/alter-group.md new file mode 100644 index 000000000..0fa17ac90 --- /dev/null +++ b/content/en/docs/Developerguide/alter-group.md @@ -0,0 +1,56 @@ +# ALTER GROUP + +## Function + +**ALTER GROUP** modifies the attributes of a user group. + +## Precautions + +**ALTER GROUP** is an alias for **ALTER ROLE**, and it is not a standard SQL syntax and not recommended. Users can use **ALTER ROLE** directly. + +## Syntax + +- Add users to a group. + + ``` + ALTER GROUP group_name + ADD USER user_name [, ... ]; + ``` + + +- Remove users from a group. + + ``` + ALTER GROUP group_name + DROP USER user_name [, ... ]; + ``` + +- Change the name of the group. + + ``` + ALTER GROUP group_name + RENAME TO new_name; + ``` + + +## Parameter Description + +See [Parameter Description](alter-role.md#en-us_topic_0237122068_en-us_topic_0059778744_s50961af6143d4aafaf8fa02febbbf331) in **ALTER ROLE**. + +## Example + +``` +-- Add users to a group. +postgres=# ALTER GROUP super_users ADD USER lche, jim; + +-- Remove users from a group. +postgres=# ALTER GROUP super_users DROP USER jim; + +-- Change the name of the group. +postgres=# ALTER GROUP super_users RENAME TO normal_users; +``` + +## Helpful Links + +[ALTER GROUP](alter-group.md), [DROP GROUP](drop-group.md), and [ALTER ROLE](alter-role.md) + diff --git a/content/en/docs/Developerguide/alter-index.md b/content/en/docs/Developerguide/alter-index.md new file mode 100644 index 000000000..c405496af --- /dev/null +++ b/content/en/docs/Developerguide/alter-index.md @@ -0,0 +1,165 @@ +# ALTER INDEX + +## Function + +**ALTER INDEX** modifies the definition of an existing index. + +It has the following forms: + +- IF EXISTS + + Sends a notice instead of an error if the specified index does not exist. + +- RENAME TO + + Changes only the name of the index. The stored data is not affected. + +- SET TABLESPACE + + This option changes the index tablespace to the specified tablespace and moves index-related data files to the new tablespace. + +- SET \( \{ STORAGE\_PARAMETER = value \} \[, ...\] \) + + Changes one or more index-method-specific storage parameters of an index. Note that the index content will not be modified immediately by this statement. You may need to use **REINDEX** to recreate the index based on different parameters to achieve the expected effect. + +- RESET \( \{ storage\_parameter \} \[, ...\] \) + + Resets one or more index-method-specific storage parameters of an index to the default value. Similar to the **SET** statement, **REINDEX** may be used to completely update the index. + +- \[ MODIFY PARTITION index\_partition\_name \] UNUSABLE + + Sets the indexes on a table or index partition to be unavailable. + +- REBUILD \[ PARTITION index\_partition\_name \] + + Rebuilds indexes on a table or an index partition. + +- RENAME PARTITION + + Renames an index partition. + +- MOVE PARTITION + + Modifies the tablespace to which an index partition belongs. + + +## Precautions + +Only the index owner or a system administrator can run this statement. + +## Syntax + +- Rename a table index. + + ``` + ALTER INDEX [ IF EXISTS ] index_name + RENAME TO new_name; + ``` + + +- Change the tablespace to which a table index belongs. + + ``` + ALTER INDEX [ IF EXISTS ] index_name + SET TABLESPACE tablespace_name; + ``` + + +- Modify the storage parameter of a table index. + + ``` + ALTER INDEX [ IF EXISTS ] index_name + SET ( {storage_parameter = value} [, ... ] ); + ``` + + +- Reset the storage parameter of a table index. + + ``` + ALTER INDEX [ IF EXISTS ] index_name + RESET ( storage_parameter [, ... ] ) ; + ``` + + +- Set a table index or an index partition to be unavailable. + + ``` + ALTER INDEX [ IF EXISTS ] index_name + [ MODIFY PARTITION index_partition_name ] UNUSABLE; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The syntax cannot be used for column-store tables. + + +- Rebuild a table index or index partition. + + ``` + ALTER INDEX index_name + REBUILD [ PARTITION index_partition_name ]; + ``` + + +- Rename an index partition. + + ``` + ALTER INDEX [ IF EXISTS ] index_name + RENAME PARTITION index_partition_name TO new_index_partition_name; + ``` + + +- Modify the tablespace to which an index partition belongs. + + ``` + ALTER INDEX [ IF EXISTS ] index_name + MOVE PARTITION index_partition_name TABLESPACE new_tablespace; + ``` + + +## Parameter Description + +- **index\_name** + + Specifies the index name to be modified. + +- **new\_name** + + Specifies the new name of the index. + + Value range: a string. It must comply with the naming convention rule. + +- **tablespace\_name** + + Specifies the tablespace name. + + Value range: an existing tablespace name + +- **storage\_parameter** + + Specifies the name of an index-method-specific parameter. + +- **value** + + Specifies the new value for an index-method-specific storage parameter. This might be a number or a word depending on the parameter. + +- **new\_index\_partition\_name** + + Specifies the new name of the index partition. + +- **index\_partition\_name** + + Specifies the name of an index partition. + +- **new\_tablespace** + + Specifies a new tablespace. + + +## Examples + +See [Examples](create-index.md#en-us_topic_0237122106_en-us_topic_0059777455_s985289833081489e9d77c485755bd362) in **CREATE INDEX**. + +## Helpful Links + +[CREATE INDEX](create-index.md), [DROP INDEX](drop-index.md), and [REINDEX](reindex.md) + diff --git a/content/en/docs/Developerguide/alter-large-object.md b/content/en/docs/Developerguide/alter-large-object.md new file mode 100644 index 000000000..8a0997975 --- /dev/null +++ b/content/en/docs/Developerguide/alter-large-object.md @@ -0,0 +1,36 @@ +# ALTER LARGE OBJECT + +## Function + +**ALTER LARGE OBJECT** changes the owner of a large object. + +## Precautions + +Only a system administrator or the owner of the to-be-modified large object can run **ALTER LARGE OBJECT**. + +## Syntax + +``` +ALTER LARGE OBJECT large_object_oid + OWNER TO new_owner; +``` + +## Parameter Description + +- **large\_object\_oid** + + Specifies the OID of the large object to be modified. + + Value range: an existing large object name + +- **OWNER TO new\_owner** + + Specifies the new owner of an object. + + Value range: an existing username or role name + + +## Examples + +None + diff --git a/content/en/docs/Developerguide/alter-role.md b/content/en/docs/Developerguide/alter-role.md new file mode 100644 index 000000000..39cd2d3f7 --- /dev/null +++ b/content/en/docs/Developerguide/alter-role.md @@ -0,0 +1,118 @@ +# ALTER ROLE + +## Function + +**ALTER ROLE** modifies role attributes. + +## Precautions + +None + +## Syntax + +- Modify the permissions of a role. + + ``` + ALTER ROLE role_name [ [ WITH ] option [ ... ] ]; + ``` + + The **option** clause for granting permissions is as follows: + + ``` + {CREATEDB | NOCREATEDB} + | {CREATEROLE | NOCREATEROLE} + | {INHERIT | NOINHERIT} + | {AUDITADMIN | NOAUDITADMIN} + | {SYSADMIN | NOSYSADMIN} + | {MONADMIN | NOMONADMIN} + | {OPRADMIN | NOOPRADMIN} + | {POLADMIN | NOPOLADMIN} + | {USEFT | NOUSEFT} + | {LOGIN | NOLOGIN} + | {REPLICATION | NOREPLICATION} + | {INDEPENDENT | NOINDEPENDENT} + | {VCADMIN | NOVCADMIN} + | CONNECTION LIMIT connlimit + | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password' + | [ ENCRYPTED | UNENCRYPTED ] IDENTIFIED BY 'password' [ REPLACE 'old_password' ] + | [ ENCRYPTED | UNENCRYPTED ] PASSWORD { 'password' | DISABLE } + | [ ENCRYPTED | UNENCRYPTED ] IDENTIFIED BY { 'password' [ REPLACE 'old_password' ] | DISABLE } + | VALID BEGIN 'timestamp' + | VALID UNTIL 'timestamp' + | RESOURCE POOL 'respool' + | PERM SPACE 'spacelimit' + | ACCOUNT { LOCK | UNLOCK } + | PGUSER + ``` + +- Rename a role. + + ``` + ALTER ROLE role_name + RENAME TO new_name; + ``` + +- Set parameters for a role. + + ``` + ALTER ROLE role_name [ IN DATABASE database_name ] + SET configuration_parameter {{ TO | = } { value | DEFAULT } | FROM CURRENT}; + ``` + +- Reset parameters for a role. + + ``` + ALTER ROLE role_name + [ IN DATABASE database_name ] RESET {configuration_parameter|ALL}; + ``` + + +## Parameter Description + +- **role\_name** + + Specifies a role name. + + Value range: an existing username + +- **IN DATABASE database\_name** + + Modifies the parameters of a role in a specified database. + +- **SET configuration\_parameter** + + Sets parameters for a role. Session parameters modified by **ALTER ROLE** apply to a specified role and take effect in the next session triggered by the role. + + Value range: + + For details about the values of **configuration\_parameter** and **value**, see [SET](set.md). + + **DEFAULT**: clears the value of **configuration\_parameter**. **configuration\_parameter** will inherit the default value of the new session generated for the role. + + **FROM CURRENT**: uses the value of **configuration\_parameter** of the current session. + +- **RESET configuration\_parameter/ALL** + + Clears the value of **configuration\_parameter**. The statement has the same effect as that of **SET configuration\_parameter TO DEFAULT**. + + Value range: **ALL** indicates that the values of all parameters are cleared. + +- **ACCOUNT LOCK | ACCOUNT UNLOCK** + - **ACCOUNT LOCK**: locks an account to forbid login to databases. + - **ACCOUNT UNLOCK**: unlocks an account to allow login to databases. + +- **PGUSER** + + In the current version, the **PGUSER** attribute of a role cannot be modified. + + +For details about other parameters, see [Parameter Description](create-role.md#en-us_topic_0237122112_en-us_topic_0059778189_s5a43ec5742a742089e2c302063de7fe4) in **CREATE ROLE**. + +## Example + +See [Example:](create-role.md#en-us_topic_0237122112_en-us_topic_0059778189_s0dea2f90b8474387aff0ab3f366a611e) in **CREATE ROLE**. + +## Helpful Links + +[CREATE ROLE](create-role.md), [DROP ROLE](drop-role.md), and [SET](set.md) + diff --git a/content/en/docs/Developerguide/alter-row-level-security-policy.md b/content/en/docs/Developerguide/alter-row-level-security-policy.md new file mode 100644 index 000000000..19ad4db92 --- /dev/null +++ b/content/en/docs/Developerguide/alter-row-level-security-policy.md @@ -0,0 +1,107 @@ +# ALTER ROW LEVEL SECURITY POLICY + +## Function + +**ALTER ROW LEVEL SECURITY POLICY** modifies an existing row-level access control policy, including the policy name and the users and expressions affected by the policy. + +## Precautions + +Only the table owner or a system administrator can perform this operation. + +## Syntax + +``` +ALTER [ ROW LEVEL SECURITY ] POLICY [ IF EXISTS ] policy_name ON table_name RENAME TO new_policy_name; + +ALTER [ ROW LEVEL SECURITY ] POLICY policy_name ON table_name + [ TO { role_name | PUBLIC } [, ...] ] + [ USING ( using_expression ) ]; +``` + +## Parameter Description + +- policy\_name + + Specifies the name of a row-level access control policy. + +- table\_name + + Specifies the name of a table to which a row-level access control policy is applied. + +- new\_policy\_name + + Specifies the new name of a row-level access control policy. + +- role\_name + + Specifies names of users affected by a row-level access control policy. PUBLIC indicates that the row-level access control policy will affect all users. + +- using\_expression + + Specifies an expression defined for a row-level access control policy. The return value is of the boolean type. + + +## Examples + +``` +-- Create the data table all_data. +postgres=# CREATE TABLE all_data(id int, role varchar(100), data varchar(100)); + +--Create a row-level access control policy to specify that the current user can view only their own data. +postgres=# CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = CURRENT_USER); +postgres=# \d+ all_data + Table "public.all_data" + Column | Type | Modifiers | Storage | Stats target | Description +--------+------------------------+-----------+----------+--------------+------------- + id | integer | | plain | | + role | character varying(100) | | extended | | + data | character varying(100) | | extended | | +Row Level Security Policies: + POLICY "all_data_rls" + USING (((role)::name = "current_user"())) +Has OIDs: no +Location Nodes: ALL DATANODES +Options: orientation=row, compression=no + +-- Change the name of the all_data_rls policy. +postgres=# ALTER ROW LEVEL SECURITY POLICY all_data_rls ON all_data RENAME TO all_data_new_rls; + +-- Change the users affected by the row-level access control policy. +postgres=# ALTER ROW LEVEL SECURITY POLICY all_data_new_rls ON all_data TO alice, bob; +postgres=# \d+ all_data + Table "public.all_data" + Column | Type | Modifiers | Storage | Stats target | Description +--------+------------------------+-----------+----------+--------------+------------- + id | integer | | plain | | + role | character varying(100) | | extended | | + data | character varying(100) | | extended | | +Row Level Security Policies: + POLICY "all_data_new_rls" + TO alice,bob + USING (((role)::name = "current_user"())) +Has OIDs: no +Location Nodes: ALL DATANODES +Options: orientation=row, compression=no, enable_rowsecurity=true + +-- Modify the expression defined for the access control policy. +postgres=# ALTER ROW LEVEL SECURITY POLICY all_data_new_rls ON all_data USING (id > 100 AND role = current_user); +postgres=# \d+ all_data + Table "public.all_data" + Column | Type | Modifiers | Storage | Stats target | Description +--------+------------------------+-----------+----------+--------------+------------- + id | integer | | plain | | + role | character varying(100) | | extended | | + data | character varying(100) | | extended | | +Row Level Security Policies: + POLICY "all_data_new_rls" + TO alice,bob + USING (((id > 100) AND ((role)::name = "current_user"()))) +Has OIDs: no +Location Nodes: ALL DATANODES +Options: orientation=row, compression=no, enable_rowsecurity=true +``` + +## Helpful Links + +[CREATE ROW LEVEL SECURITY POLICY](create-row-level-security-policy.md) and [DROP ROW LEVEL SECURITY POLICY](drop-row-level-security-policy.md) + diff --git a/content/en/docs/Developerguide/alter-schema.md b/content/en/docs/Developerguide/alter-schema.md new file mode 100644 index 000000000..fc9db0aac --- /dev/null +++ b/content/en/docs/Developerguide/alter-schema.md @@ -0,0 +1,76 @@ +# ALTER SCHEMA + +## Function + +**ALTER SCHEMA** modifies schema properties. + +## Precautions + +Only the owner of a schema or a system administrator has the permission to run the **ALTER SCHEMA** statement. + +## Syntax + +- Rename a schema. + + ``` + ALTER SCHEMA schema_name + RENAME TO new_name; + ``` + +- Change the owner of a schema. + + ``` + ALTER SCHEMA schema_name + OWNER TO new_owner; + ``` + + +## Parameter Description + +- **schema\_name** + + Specifies the name of an existing schema. + + Value range: an existing schema name + +- **RENAME TO new\_name** + + Renames a schema. + + **new\_name**: new name of the schema. + + Value range: a string. It must comply with the naming convention rule. + +- **OWNER TO new\_owner** + + Changes the owner of a schema. To do this as a non-administrator, you must be a direct or indirect member of the new owning role, and that role must have **CREATE** permission in the database. + + **new\_owner**: new owner of the schema. + + Value range: an existing username or role name + + +## Examples + +``` +-- Create the ds schema. +postgres=# CREATE SCHEMA ds; + +-- Rename the current schema ds to ds_new. +postgres=# ALTER SCHEMA ds RENAME TO ds_new; + +-- Create user jack. +postgres=# CREATE USER jack PASSWORD 'Bigdata@123'; + +-- Change the owner of ds_new to jack. +postgres=# ALTER SCHEMA ds_new OWNER TO jack; + +-- Delete user jack and schema ds_new. +postgres=# DROP SCHEMA ds_new; +postgres=# DROP USER jack; +``` + +## Helpful Links + +[CREATE SCHEMA](create-schema.md) and [DROP SCHEMA](drop-schema.md) + diff --git a/content/en/docs/Developerguide/alter-sequence.md b/content/en/docs/Developerguide/alter-sequence.md new file mode 100644 index 000000000..f0c444992 --- /dev/null +++ b/content/en/docs/Developerguide/alter-sequence.md @@ -0,0 +1,76 @@ +# ALTER SEQUENCE + +## Function + +**ALTER SEQUENCE** modifies the parameters of an existing sequence. + +## Precautions + +- You must be the owner of the sequence to use **ALTER SEQUENCE**. +- In the current version, you can modify only the owner, owning column, and maximum value. To modify other parameters, delete the sequence and create it again. Then, use the **Setval** function to restore parameter values. +- **ALTER SEQUENCE MAXVALUE** cannot be used in transactions, functions, and stored procedures. +- After the maximum value of a sequence is changed, the cache of the sequence in all sessions is cleared. +- The **ALTER SEQUENCE** statement blocks the invoking of **nextval**, **setval**, **currval**, and **lastval**. + +## Syntax + +Change the owning column of a sequence. + +``` +ALTER SEQUENCE [ IF EXISTS ] name + [MAXVALUE maxvalue | NO MAXVALUE | NOMAXVALUE] + [ OWNED BY { table_name.column_name | NONE } ] ; +``` + +Change the owner of a sequence. + +``` +ALTER SEQUENCE [ IF EXISTS ] name OWNER TO new_owner; +``` + +## Parameter Description + +- name + + Specifies the name of the sequence to be modified. + +- IF EXISTS + + Sends a notice instead of an error when you are modifying a nonexisting sequence. + +- OWNED BY + + Associates a sequence with a specified column included in a table. In this way, the sequence will be deleted when you delete its associated column or the table where the column belongs to. + + If the sequence has been associated with another table before you use this option, the new association will overwrite the old one. + + The associated table and sequence must be owned by the same user and in the same schema. + + If **OWNED BY NONE** is used, all existing associations will be deleted. + +- new\_owner + + Specifies the username of the new owner of the sequence. To change the owner, you must also be a direct or indirect member of the new role, and this role must have **CREATE** permission on the sequence's schema. + + +## Examples + +``` +-- Create an ascending sequence named serial, which starts from 101. +postgres=# CREATE SEQUENCE serial START 101; + +-- Create a table and specify default values for the sequence. +postgres=# CREATE TABLE T1(C1 bigint default nextval('serial')); + +-- Change the owning column of serial to T1.C1. +postgres=# ALTER SEQUENCE serial OWNED BY T1.C1; + +-- Delete the sequence. +postgres=# DROP SEQUENCE serial cascade; +postgres=# DROP TABLE T1; +``` + +## Helpful Links + +[CREATE SEQUENCE](create-sequence.md) and [DROP SEQUENCE](drop-sequence.md) + diff --git a/content/en/docs/Developerguide/alter-session.md b/content/en/docs/Developerguide/alter-session.md new file mode 100644 index 000000000..9f1cdab92 --- /dev/null +++ b/content/en/docs/Developerguide/alter-session.md @@ -0,0 +1,85 @@ +# ALTER SESSION + +## Function + +**ALTER SESSION** defines or modifies the conditions or parameters that affect the current session. Modified session parameters are kept until the current session is disconnected. + +## Precautions + +- If the **START TRANSACTION** statement is not executed before the **SET TRANSACTION** statement, the transaction is ended instantly and the statement does not take effect. +- You can use the **transaction\_mode\(s\)** method declared in the **START TRANSACTION** statement to avoid using the **SET TRANSACTION** statement. + +## Syntax + +- Set transaction parameters of a session. + + ``` + ALTER SESSION SET [ SESSION CHARACTERISTICS AS ] TRANSACTION + { ISOLATION LEVEL { READ COMMITTED } | { READ ONLY | READ WRITE } } [, ...] ; + ``` + +- Set other running parameters of a session. + + ``` + ALTER SESSION SET + {{config_parameter { { TO | = } { value | DEFAULT } + | FROM CURRENT }} + | TIME ZONE time_zone + | CURRENT_SCHEMA schema + | NAMES encoding_name + | ROLE role_name PASSWORD 'password' + | SESSION AUTHORIZATION { role_name PASSWORD 'password' | DEFAULT } + | XML OPTION { DOCUMENT | CONTENT } + } ; + ``` + + +## Parameter Description + +For details about the descriptions of parameters related to **ALTER SESSION**, see [Parameter Description](set.md#en-us_topic_0237122186_en-us_topic_0059779029_s39823c7ebd854a9f9c761b3a32b1c3c3) of the SET syntax. + +## Examples + +``` +-- Create the ds schema. +postgres=# CREATE SCHEMA ds; + +-- Set the search path of the schema. +postgres=# SET SEARCH_PATH TO ds, public; + +-- Set the time/date type to the traditional postgres format (date before month). +postgres=# SET DATESTYLE TO postgres, dmy; + +-- Set the character code of the current session to UTF8. +postgres=# ALTER SESSION SET NAMES 'UTF8'; + +-- Set the time zone to Berkeley of California. +postgres=# SET TIME ZONE 'PST8PDT'; + +-- Set the time zone to Italy. +postgres=# SET TIME ZONE 'Europe/Rome'; + +-- Set the current schema. +postgres=# ALTER SESSION SET CURRENT_SCHEMA TO tpcds; + +-- Set XML OPTION to DOCUMENT. +postgres=# ALTER SESSION SET XML OPTION DOCUMENT; + +-- Create the role joe and set it as the session role. +postgres=# CREATE ROLE joe WITH PASSWORD 'Bigdata@123'; +postgres=# ALTER SESSION SET SESSION AUTHORIZATION joe PASSWORD 'Bigdata@123'; + +-- Switch to the default user. +postgres=> ALTER SESSION SET SESSION AUTHORIZATION default; + +-- Delete the ds schema. +postgres=# DROP SCHEMA ds; + +-- Delete the role joe. +postgres=# DROP ROLE joe; +``` + +## Helpful Links + +[SET](set.md) + diff --git a/content/en/docs/Developerguide/alter-synonym.md b/content/en/docs/Developerguide/alter-synonym.md new file mode 100644 index 000000000..0dcf779e7 --- /dev/null +++ b/content/en/docs/Developerguide/alter-synonym.md @@ -0,0 +1,58 @@ +# ALTER SYNONYM + +## Function + +**ALTER SYNONYM** modifies the attributes of the **SYNONYM** object. + +## Precautions + +- Currently, only the owner of the **SYNONYM** object can be changed. +- Only the system administrator has the permission to modify the owner of the **SYNONYM** object. +- The new owner must have the **CREATE** permission on the schema where the **SYNONYM** object resides. + +## Syntax + +``` +ALTER SYNONYM synonym_name + OWNER TO new_owner; +``` + +## Parameter Description + +- **synonym** + + Specifies the name of the synonym to be modified, which can contain the schema name. + + Value range: a string. It must comply with the naming convention rule. + + +- **new\_owner** + + Specifies the new owner of the **SYNONYM** object. + + Value range: a string. It must be a valid username. + + +## Examples + +``` +-- Create synonym t1. +postgres=# CREATE OR REPLACE SYNONYM t1 FOR ot.t1; + +-- Create a user u1. +postgres=# CREATE USER u1 PASSWORD 'user@111'; + +-- Change the owner of synonym t1 to u1. +postgres=# ALTER SYNONYM t1 OWNER TO u1; + +-- Delete synonym t1. +postgres=# DROP SYNONYM t1; + +-- Delete user u1. +postgres=# DROP USER u1; +``` + +## Helpful Links + +[CREATE DATA SOURCE](create-data-source.md) and [DROP DATA SOURCE](drop-data-source.md) + diff --git a/content/en/docs/Developerguide/alter-system-kill-session.md b/content/en/docs/Developerguide/alter-system-kill-session.md new file mode 100644 index 000000000..d092da95c --- /dev/null +++ b/content/en/docs/Developerguide/alter-system-kill-session.md @@ -0,0 +1,50 @@ +# ALTER SYSTEM KILL SESSION + +## Function + +**ALTER SYSTEM KILL SESSION** ends a session. + +## Precautions + +None + +## Syntax + +``` +ALTER SYSTEM KILL SESSION 'session_sid, serial' [ IMMEDIATE ]; +``` + +## Parameter Description + +- **session\_sid, serial** + + Specifies the SID and SERIAL of a session \(To obtain the values, see the example.\) + +- **IMMEDIATE** + + Specifies that a session will be ended instantly after the statement is executed. + + +## Example + +``` +-- Query session information. +postgres=# +SELECT sa.sessionid AS sid,0::integer AS serial#,ad.rolname AS username FROM pg_stat_get_activity(NULL) AS sa +LEFT JOIN pg_authid ad ON(sa.usesysid = ad.oid)WHERE sa.application_name <> 'JobScheduler'; + sid | serial# | username +-----------------+---------+---------- + 140131075880720 | 0 | omm + 140131025549072 | 0 | omm + 140131073779472 | 0 | omm + 140131071678224 | 0 | omm + 140131125774096 | 0 | + 140131127875344 | 0 | + 140131113629456 | 0 | + 140131094742800 | 0 | +(8 rows) + +-- End the session whose SID is 140131075880720. +postgres=# ALTER SYSTEM KILL SESSION '140131075880720,0' IMMEDIATE; +``` + diff --git a/content/en/docs/Developerguide/alter-table-partition.md b/content/en/docs/Developerguide/alter-table-partition.md new file mode 100644 index 000000000..594784282 --- /dev/null +++ b/content/en/docs/Developerguide/alter-table-partition.md @@ -0,0 +1,244 @@ +# ALTER TABLE PARTITION + +## Function + +**ALTER TABLE PARTITION** modifies table partition, including adding, deleting, splitting, merging partitions, and modifying partition attributes. + +## Precautions + +- The tablespace for the added partition cannot be **PG\_GLOBAL**. +- The name of the added partition must be different from names of existing partitions in the partition table. +- The partition key of the added partition must be the same type as that of the partition table. The key value of the added partition must exceed the upper limit of the last partition range. +- If the number of partitions in the target partition table has reached the maximum \(32767\), partitions cannot be added. + +- If a partition table has only one partition, the partition cannot be deleted. +- Use **PARTITION FOR\(\)** to choose partitions. The number of specified values in the brackets should be the same as the column number in customized partition, and they must be consistent. +- The **Value** partition table does not support the **Alter Partition** operation. +- Column-store tables and row-store tables cannot be partitioned. + +## Syntax + +- Modify the syntax of the table partition. + + ``` + ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} + action [, ... ]; + ``` + + **action** indicates the following clauses for maintaining partitions. For the partition continuity when multiple clauses are used for partition maintenance, openGauss does **DROP PARTITION** and then **ADD PARTITION**, and finally runs the rest clauses in sequence. + + ``` + move_clause | + exchange_clause | + row_clause | + merge_clause | + modify_clause | + split_clause | + add_clause | + drop_clause + ``` + + - The **move\_clause** syntax is used to move the partition to a new tablespace. + + ``` + MOVE PARTITION { partion_name | FOR ( partition_value [, ...] ) } TABLESPACE tablespacename + ``` + + - The **exchange\_clause** syntax is used to move the data from a general table to a specified partition. + + ``` + EXCHANGE PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } + WITH TABLE {[ ONLY ] ordinary_table_name | ordinary_table_name * | ONLY ( ordinary_table_name )} + [ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ] + ``` + + The ordinary table and partition whose data is to be exchanged must meet the following requirements: + + - The number of columns of the ordinary table is the same as that of the partition, and their information should be consistent, including: column name, data type, constraint, collation information, storage parameter, and compression information. + - The compression information of the ordinary table and partition should be consistent. + - The distribution key information of the ordinary table and partition should be consistent. + - The number and information of indexes of the ordinary table and partition should be consistent. + - The number and information of constraints of the ordinary table and partition should be consistent. + - The ordinary table cannot be a temporary table. + + When the exchange is done, the data and tablespace of the ordinary table and partition are exchanged. The statistics of the ordinary table and partition are no longer inaccurate after the exchange, and they should be analyzed again. + + - The **row\_clause** syntax is used to set row movement of a partitioned table. + + ``` + { ENABLE | DISABLE } ROW MOVEMENT + ``` + + - The **merge\_clause** syntax is used to merge partitions into one. + + ``` + MERGE PARTITIONS { partition_name } [, ...] INTO PARTITION partition_name + [ TABLESPACE tablespacename ] + ``` + + - The **modify\_clause** syntax is used to set whether a partition index is usable. + + ``` + MODIFY PARTITION partition_name { UNUSABLE LOCAL INDEXES | REBUILD UNUSABLE LOCAL INDEXES } + ``` + + - The **split\_clause** syntax is used to split one partition into partitions. + + ``` + SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause } + ``` + + - The **split\_point\_clause** syntax is used to specify a split point. + + ``` + AT ( partition_value ) INTO ( PARTITION partition_name [ TABLESPACE tablespacename ] , PARTITION partition_name [ TABLESPACE tablespacename ] ) + ``` + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- Column-store tables and row-store tables cannot be partitioned. + >- The size of the split point should be in the range of partition keys of the partition to be split. The split point can only split one partition into two new partitions. + + - The **no\_split\_point\_clause** syntax does not specify a split point. + + ``` + INTO { ( partition_less_than_item [, ...] ) | ( partition_start_end_item [, ...] ) } + ``` + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- The first new partition key specified by **partition\_less\_than\_item** should be greater than that of the previously split partition \(if any\), and the last partition key specified by **partition\_item\_clause** should equal that of the partition being split. + >- The first new partition key specified by **partition\_start\_end\_item** should equal that of the former partition \(if any\), and the last partition key specified by **partition\_start\_end\_item** should equal that of the partition being split. + >- **partition\_less\_than\_item** supports a maximum of 4 partition keys, while **partition\_start\_end\_item** supports only one partition key. For details about the supported data types, see [PARTITION BY RANGE\(parti...](create-table-partition.md#en-us_topic_0237122119_en-us_topic_0059777586_l00efc30fe63048ffa2ef68c5b18bb455). + >- **partition\_less\_than\_item** and **partition\_start\_end\_item** cannot be used in the same statement. + + + - The syntax of **partition\_less\_than\_item** is as follows: + + ``` + PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } [, ...] ) + [ TABLESPACE tablespacename ] + ``` + + - The syntax of **partition\_start\_end\_item** is as follows. For details about the constraints, see [partition\_start\_end\_item syntax](create-table-partition.md#en-us_topic_0237122119_li2094151861116). + + ``` + PARTITION partition_name { + {START(partition_value) END (partition_value) EVERY (interval_value)} | + {START(partition_value) END ({partition_value | MAXVALUE})} | + {START(partition_value)} | + {END({partition_value | MAXVALUE})} + } [TABLESPACE tablespace_name] + + ``` + + + - The **add\_clause** syntax is used to add one or more partitions to a specified partitioned table. + + ``` + ADD {partition_less_than_item | partition_start_end_item} + ``` + + - The **drop\_clause** syntax is used to remove a partition from a specified partitioned table. + + ``` + DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) } + ``` + + +- The syntax for modifying the name of a partition is as follows: + + ``` + ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} + RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name; + ``` + + +## Parameter Description + +- **table\_name** + + Specifies the name of a partitioned table. + + Value range: an existing table name + +- **partition\_name** + + Specifies the name of a partition. + + Value range: an existing table name + +- **tablespacename** + + Specifies which tablespace the partition moves to. + + Value range: an existing table name + +- **partition\_value** + + Specifies the key value of a partition. + + The value specified by **PARTITION FOR \( partition\_value \[, ...\] \)** can uniquely identify a partition. + + Value range: partition keys for the partition to be renamed + +- **UNUSABLE LOCAL INDEXES** + + Sets all the indexes unusable in the partition. + +- **REBUILD UNUSABLE LOCAL INDEXES** + + Rebuilds all the indexes in the partition. + +- **ENABLE/DISABLE ROW MOVEMET** + + Sets row movement. + + If the tuple value is updated on the partition key during the **UPDATE** action, the partition where the tuple is located is altered. Setting this parameter enables error messages to be reported or movement of the tuple between partitions. + + Value range: + + - **ENABLE**: Row movement is enabled. + - **DISABLE**: Row movement is disabled. + + The default value is **ENABLE**. + +- **ordinary\_table\_name** + + Specifies the name of the ordinary table whose data is to be migrated. + + Value range: an existing table name + +- **\{ WITH | WITHOUT \} VALIDATION** + + Checks whether the ordinary table data meets the specified partition key range of the partition to be migrated. + + Value range: + + - **WITH**: checks whether the ordinary table data meets the partition key range of the partition to be migrated. If any data does not meet the required range, an error is reported. + - **WITHOUT**: does not check whether the ordinary table data meets the partition key range of the partition to be migrated. + + The default value is **WITH**. + + The check is time consuming, especially when the data volume is large. Therefore, use **WITHOUT** when you are sure that the current ordinary table data meets the partition key range of the partition to be migrated. + +- **VERBOSE** + + When **VALIDATION** is **WITH**, if the ordinary table contains data that is out of the partition key range, insert the data to the correct partition. If there is no correct partition where the data can be inserted to, an error is reported. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >Only when **VALIDATION** is **WITH**, **VERBOSE** can be specified. + +- **partition\_new\_name** + + Specifies the new name of a partition. + + Value range: a string. It must comply with the naming convention. + + +## Example + +See [Examples](create-table-partition.md#en-us_topic_0237122119_en-us_topic_0059777586_s43dd49de892344bf89e6f56f17404842) in **CREATE TABLE PARTITION**. + +## Helpful Links + +[CREATE TABLE PARTITION](create-table-partition.md), and [DROP TABLE](drop-table.md) + diff --git a/content/en/docs/Developerguide/alter-table.md b/content/en/docs/Developerguide/alter-table.md new file mode 100644 index 000000000..0d12e3f86 --- /dev/null +++ b/content/en/docs/Developerguide/alter-table.md @@ -0,0 +1,431 @@ +# ALTER TABLE + +## Function + +**ALTER TABLE** modifies tables, including modifying table definitions, renaming tables, renaming specified columns in tables, renaming table constraints, setting table schemas, enabling or disabling row-level access control, and adding or updating multiple columns. + +## Precautions + +- Only the table owner or a system administrator has the permission to run the **ALTER TABLE** statement. +- The tablespace of a partitioned table cannot be modified, but the tablespace of the partition can be modified. +- The storage parameter **ORIENTATION** cannot be modified. +- Currently, **SET SCHEMA** can only set schemas to user schemas. It cannot set a schema to a system internal schema. +- Column-store tables support only **PARTIAL CLUSTER KEY** table-level constraints, but do not support primary and foreign key table-level constraints. +- In a column-store table, you can perform **ADD COLUMN**, **ALTER TYPE**, **SET STATISTICS**, **DROP COLUMN** operations, and change table name and space. The types of new and modified columns should be the [Data Types](data-types.md) supported by column-store. The **USING** option of **ALTER TYPE** only supports constant expression and expression involved in the column. +- The column constraints supported by column-store tables include **NULL**, **NOT NULL**, and **DEFAULT** constant values. Only the **DEFAULT** value can be modified \(by using **SET DEFAULT** and **DROP DEFAULT**\). Currently, **NULL** and **NOT NULL** constraints cannot be modified. + +- Auto-increment columns cannot be added, or a column whose **DEFAULT** value contains the **nextval\(\)** expression cannot be added. +- Row-access control cannot be enabled for foreign tables and temporary tables. +- When you delete a **PRIMARY KEY** constraint by constraint name, the **NOT NULL** constraint is not deleted. If necessary, manually delete the **NOT NULL** constraint. +- When JDBC is used, the **DEFAULT** value can be set through **PrepareStatement**. + +## Syntax + +- Modify the definition of a table. + + ``` + ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name ) } + action [, ... ]; + ``` + + There are several clauses of **action**: + + ``` + column_clause + | ADD table_constraint [ NOT VALID ] + | ADD table_constraint_using_index + | VALIDATE CONSTRAINT constraint_name + | DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] + | CLUSTER ON index_name + | SET WITHOUT CLUSTER + | SET ( {storage_parameter = value} [, ... ] ) + | RESET ( storage_parameter [, ... ] ) + | OWNER TO new_owner + | SET TABLESPACE new_tablespace + | SET {COMPRESS|NOCOMPRESS} + | TO { GROUP groupname | NODE ( nodename [, ... ] ) } + | ADD NODE ( nodename [, ... ] ) + | DELETE NODE ( nodename [, ... ] ) + | DISABLE TRIGGER [ trigger_name | ALL | USER ] + | ENABLE TRIGGER [ trigger_name | ALL | USER ] + | ENABLE REPLICA TRIGGER trigger_name + | ENABLE ALWAYS TRIGGER trigger_name + | DISABLE ROW LEVEL SECURITY + | ENABLE ROW LEVEL SECURITY + | FORCE ROW LEVEL SECURITY + | NO FORCE ROW LEVEL SECURITY + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- **ADD table\_constraint \[ NOT VALID \]** + > Adds a table constraint. + >- **ADD table\_constraint\_using\_index** + > Adds a primary key constraint or unique constraint to a table based on the existing unique index. + >- **VALIDATE CONSTRAINT constraint\_name** + > Validates a check-class constraint created with the **NOT VALID** option, and scans the entire table to ensure that all rows meet the constraint. Nothing happens if the constraint is already marked valid. + >- **DROP CONSTRAINT \[ IF EXISTS \] constraint\_name \[ RESTRICT | CASCADE \]** + > Drops a table constraint. + >- **CLUSTER ON index\_name** + > Selects the default index for future CLUSTER operations. Actually, the table is not re-clustered. + >- **SET WITHOUT CLUSTER** + > Deletes the most recently used **CLUSTER** index from the table. This affects future **CLUSTER** operations that do not specify an index. + >- **SET \( \{storage\_parameter = value\} \[, ... \] \)** + > Changes one or more storage parameters for the table. + >- **RESET \( storage\_parameter \[, ... \] \)** + > Resets one or more storage parameters to their defaults. As with **SET**, a table rewrite might be needed to update the table entirely. + >- **OWNER TO new\_owner** + > Changes the owner of a table, sequence, or view to the specified user. + >- **SET TABLESPACE new\_tablespace** + > Changes the table's tablespace to the specified tablespace and moves the data files associated with the table to the new tablespace. Indexes on the table, if any, are not moved; but they can be moved separately with additional **SET TABLESPACE** option in **ALTER INDEX**. + >- **SET \{COMPRESS|NOCOMPRESS\}** + > Sets the compression feature of a table. The table compression feature affects only the storage mode of data inserted in a batch subsequently and does not affect storage of existing data. Setting the table compression feature will result in the fact that there are both compressed and uncompressed data in the table. + >- **TO \{ GROUP groupname | NODE \( nodename \[, ... \] \) \}** + > The syntax is only available in extended mode \(when GUC parameter **support\_extended\_features** is **on**\). Exercise caution when enabling the mode. It is mainly used for tools like internal scale-out tools. Common users should not use the mode. + >- **ADD NODE \( nodename \[, ... \] \)** + > It is only available for internal scale-out tools. Common users should not use the syntax. + >- **DELETE NODE \( nodename \[, ... \] \)** + > It is only available for internal scale-in tools. Common users should not use the syntax. + >- **DISABLE TRIGGER \[ trigger\_name | ALL | USER \]** + > Disables a single trigger specified by **trigger\_name**, disables all triggers, or disables only user triggers \(excluding internally generated constraint triggers, for example, deferrable unique constraint triggers and exclusion constraint triggers\). + >![](public_sys-resources/icon-note.gif) **NOTE:** + >Exercise caution when using this function because data integrity cannot be ensured as expected if the triggers are not executed. + + - **| ENABLE TRIGGER \[ trigger\_name | ALL | USER \]** + + Enables a single trigger specified by **trigger\_name**, enables all triggers, or enables only user triggers. + + - **| ENABLE REPLICA TRIGGER trigger\_name** + + Determines that the trigger firing mechanism is affected by the configuration variable [session\_replication\_role](statement-behavior.md#en-us_topic_0237124732_en-us_topic_0059779117_sffbd1c48d86b4c3fa3287167a7810216). When the replication role is **origin** \(default value\) or **local**, a simple trigger is fired. + + When **ENABLE REPLICA** is configured for a trigger, it is fired only when the session is in replica mode. + + - **| ENABLE ALWAYS TRIGGER trigger\_name** + + Determines that all triggers are fired regardless of the current replication mode. + + - **| DISABLE/ENABLE ROW LEVEL SECURITY** + + Enables or disables row-level access control for a table. + + If row-level access control is enabled for a data table but no row-level access control policy is defined, the row-level access to the data table is not affected. If row-level access control for a table is disabled, the row-level access to the table is not affected even if a row-level access control policy has been defined. For details, see [CREATE ROW LEVEL SECURITY POLICY](create-row-level-security-policy.md). + + - **| NO FORCE/FORCE ROW LEVEL SECURITY** + + Forcibly enables or disables row-level access control for a table. + + By default, the table owner is not affected by the row-level access control feature. However, if row-level access control is forcibly enabled, the table owner \(excluding system administrators\) wil be affected. System administrators are not affected by any row-level access control policies. + + + + - There are several clauses of **column\_clause**: + + ``` + ADD [ COLUMN ] column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] + | MODIFY column_name data_type + | MODIFY column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] + | MODIFY column_name [ CONSTRAINT constraint_name ] NULL + | DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ] + | ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ] + | ALTER [ COLUMN ] column_name { SET DEFAULT expression | DROP DEFAULT } + | ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL + | ALTER [ COLUMN ] column_name SET STATISTICS [PERCENT] integer + | ADD STATISTICS (( column_1_name, column_2_name [, ...] )) + | DELETE STATISTICS (( column_1_name, column_2_name [, ...] )) + | ALTER [ COLUMN ] column_name SET ( {attribute_option = value} [, ... ] ) + | ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] ) + | ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN } + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- **ADD \[ COLUMN \] column\_name data\_type \[ compress\_mode \] \[ COLLATE collation \] \[ column\_constraint \[ ... \] \]** + > Adds a column to a table. If a column is added with **ADD COLUMN**, all existing rows in the table are initialized with the column's default value \(**NULL** if no **DEFAULT** clause is specified\). + >- **ADD \( \{ column\_name data\_type \[ compress\_mode \] \} \[, ...\] \)** + > Adds columns in the table. + >- **MODIFY \( \{ column\_name data\_type | column\_name \[ CONSTRAINT constraint\_name \] NOT NULL \[ ENABLE \] | column\_name \[ CONSTRAINT constraint\_name \] NULL \} \[, ...\] \)** + > Modifies the data type of an existing column in the table. + >- **DROP \[ COLUMN \] \[ IF EXISTS \] column\_name \[ RESTRICT | CASCADE \]** + > Drops a column from a table. Indexes and constraints related to the column are automatically dropped. If an object not belonging to the table depends on the column, **CASCADE** must be specified, such as a view. + > The **DROP COLUMN** statement does not physically remove the column, but simply makes it invisible to SQL operations. Subsequent **INSERT** and **UPDATE** operations in the table will store a **NULL** value for the column. Therefore, column deletion takes a short period of time but does not immediately release the tablespace on the disks, because the space occupied by the deleted column is not reclaimed. The space will be reclaimed when **VACUUM** is executed. + >- **ALTER \[ COLUMN \] column\_name \[ SET DATA \] TYPE data\_type \[ COLLATE collation \] \[ USING expression \]** + > Modifies the type of a column in a table. Indexes and simple table constraints on the column will automatically use the new data type by reparsing the originally supplied expression. + > **ALTER TYPE** requires an entire table be rewritten. This is an advantage sometimes, because it frees up unnecessary space from a table. For example, to reclaim the space occupied by a deleted column, the fastest method is to use the following statement. + > ``` + > ALTER TABLE table ALTER COLUMN anycol TYPE anytype; + > ``` + > In this statement, **anycol** indicates any column existing in the table and **anytype** indicates the type of the prototype of the column. **ALTER TYPE** does not change the table except that the table is forcibly rewritten. In this way, the data that is no longer used is deleted. + >- **ALTER \[ COLUMN \] column\_name \{ SET DEFAULT expression | DROP DEFAULT \}** + > Sets or removes the default value for a column. The default values only apply to subsequent **INSERT** operations; they do not cause rows already in the table to change. Defaults can also be created for views, in which case they are inserted into **INSERT** statements on the view before the view's **ON INSERT** rule is applied. + >- **ALTER \[ COLUMN \] column\_name \{ SET | DROP \} NOT NULL** + > Changes whether a column is marked to allow null values or to reject null values. You can only use **SET NOT NULL** when the column contains no null values. + >- **ALTER \[ COLUMN \] column\_name SET STATISTICS \[PERCENT\] integer** + > Specifies the per-column statistics-gathering target for subsequent **ANALYZE** operations. The target can be set in the range from 0 to 10000. Set it to **-1** to revert to using the default system statistics target. + >- **\{ADD | DELETE\} STATISTICS \(\(column\_1\_name, column\_2\_name \[, ...\]\)\)** + > Adds or deletes the declaration of collecting multi-column statistics to collect multi-column statistics as needed when **ANALYZE** is performed for a table or a database. The statistics about a maximum of 32 columns can be collected at a time. You are not allowed to add or delete such declaration for system catalogs or foreign tables. + >- **ALTER \[ COLUMN \] column\_name SET \( \{attribute\_option = value\} \[, ... \] \)** + > **ALTER \[ COLUMN \] column\_name RESET \( attribute\_option \[, ... \] \)** + > Sets or resets per-attribute options. + > Currently, the only defined per-attribute options are **n\_distinct** and **n\_distinct\_inherited**. **n\_distinct** affects statistics of a table, while **n\_distinct\_inherited** affects the statistics of the table and its subtables. Currently, only **SET/RESET n\_distinct** is supported, and **SET/RESET n\_distinct\_inherited** is forbidden. + >- **ALTER \[ COLUMN \] column\_name SET STORAGE \{ PLAIN | EXTERNAL | EXTENDED | MAIN \}** + > Sets the storage mode for a column. This clause specifies whether this column is held inline or in a secondary TOAST table, and whether the data should be compressed. It is set only for row-store tables and is invalid for column-store tables. If it is set for column-store tables, an error will be displayed when the statement is executed. **SET STORAGE** itself does not change anything in the table. It sets the strategy to be pursued during future table updates. + + - **column\_constraint** is as follows: + + ``` + [ CONSTRAINT constraint_name ] + { NOT NULL | + NULL | + CHECK ( expression ) | + DEFAULT default_expr | + UNIQUE index_parameters | + PRIMARY KEY index_parameters } + [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + ``` + + - **compress\_mode** of a column is as follows: + + ``` + [ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS ] + ``` + + + - **table\_constraint\_using\_index** used to add the primary key constraint or unique constraint based on the unique index is as follows: + + ``` + [ CONSTRAINT constraint_name ] + { UNIQUE | PRIMARY KEY } USING INDEX index_name + [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + ``` + + - **table\_constraint** is as follows: + + ``` + [ CONSTRAINT constraint_name ] + { CHECK ( expression ) | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters | + PARTIAL CLUSTER KEY ( column_name [, ... ] } + [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + ``` + + **index\_parameters** is as follows: + + ``` + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ USING INDEX TABLESPACE tablespace_name ] + ``` + + + +- Rename a table. The renaming does not affect stored data. + + ``` + ALTER TABLE [ IF EXISTS ] table_name + RENAME TO new_table_name; + ``` + +- Rename the specified column in the table. + + ``` + ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} + RENAME [ COLUMN ] column_name TO new_column_name; + ``` + +- Rename the constraint of the table. + + ``` + ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name ) } + RENAME CONSTRAINT constraint_name TO new_constraint_name; + ``` + +- Set the schema of the table. + + ``` + ALTER TABLE [ IF EXISTS ] table_name + SET SCHEMA new_schema; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The schema setting moves the table into another schema. Associated indexes and constraints owned by table columns are migrated as well. Currently, the schema for sequences cannot be changed. If the table has sequences, delete the sequences, and create them again or delete the ownership between the table and sequences. In this way, the table schema can be changed. + >- To change the schema of a table, you must also have the **CREATE** permission on the new schema. To add the table as a new child of a parent table, you must own the parent table as well. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have the **CREATE** permission on the table's schema. These restrictions enforce that the user can only recreate and delete the table. However, a system administrator can alter the ownership of any table anyway. + >- All the actions except for **RENAME** and **SET SCHEMA** can be combined into a list of multiple alterations to apply in parallel. For example, it is possible to add several columns or alter the type of several columns in a single statement. This is useful with large tables, since only one pass over the tables needs to be made. + >- Adding a **CHECK** or **NOT NULL** constraint will scan the table to validate that existing rows meet the constraint. + >- Adding a column with a non-**NULL** default or changing the type of an existing column will rewrite the entire table. Rewriting a large table may take much time and temporarily needs doubled disk space. + +- Add columns. + + ``` + ALTER TABLE [ IF EXISTS ] table_name + ADD ( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]} [, ...] ); + ``` + +- Update columns. + + ``` + ALTER TABLE [ IF EXISTS ] table_name + MODIFY ( { column_name data_type | column_name [ CONSTRAINT constraint_name ] NOT NULL [ ENABLE ] | column_name [ CONSTRAINT constraint_name ] NULL } [, ...] ); + ``` + + +## Parameter Description + +- **IF EXISTS** + + Sends a notice instead of an error if no tables have identical names. The notice prompts that the table you are querying does not exist. + +- **table\_name \[\*\] | ONLY table\_name | ONLY \( table\_name \)** + + **table\_name** is the name of the table that you need to modify. + + If **ONLY** is specified, only the table is modified. If **ONLY** is not specified, the table and all subtables will be modified. You can add the asterisk \(\*\) option following the table name to specify that all subtables are scanned, which is the default operation. + +- **constraint\_name** + + Specifies the name of an existing constraint to drop. + +- **index\_name** + + Specifies the name of this index. + +- **storage\_parameter** + + Specifies the name of a storage parameter. + +- **new\_owner** + + Specifies the name of the new table owner. + +- **new\_tablespace** + + Specifies the new name of the tablespace to which the table belongs. + +- **column\_name**, **column\_1\_name**, **column\_2\_name** + + Specifies the name of a new or existing column. + +- **data\_type** + + Specifies the type of a new column or a new type of an existing column. + +- **compress\_mode** + + Specifies the compression option of the table, which is only available for row-store tables. The clause specifies the compression algorithm preferentially used by the column. + +- **collation** + + Specifies the collation rule name of a column. The optional **COLLATE** clause specifies a collation for the new column; if omitted, the collation is the default for the new column. + +- **USING expression** + + Specifies how to compute the new column value from the old; if omitted, the default conversion is an assignment cast from old data type to new. A **USING** clause must be provided if there is no implicit or assignment cast from the old to new type. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >**USING** in **ALTER TYPE** can specify any expression involving the old values of the row; that is, it can refer to any columns other than the one being cast. This allows general casting to be done with the **ALTER TYPE** syntax. Because of this flexibility, the **USING** expression is not applied to the column's default value \(if any\); the result might not be a constant expression as required for a default. This means that when there is no implicit or assignment cast from old to new type, **ALTER TYPE** might fail to convert the default even though a **USING** clause is supplied. In such cases, drop the default with **DROP DEFAULT**, perform the **ALTER TYPE**, and then use **SET DEFAULT** to add a suitable new default. Similar considerations apply to indexes and constraints involving the column. + +- **NOT NULL | NULL** + + Sets whether the column allows null values. + +- **integer** + + Specifies the constant value of a signed integer. When using **PERCENT**, the range of **integer** is from 0 to 100. + +- **attribute\_option** + + Specifies an attribute option. + +- **PLAIN | EXTERNAL | EXTENDED | MAIN** + + Specifies a column storage mode. + + - **PLAIN** must be used for fixed-length values \(such as integers\). It must be inline and uncompressed. + - **MAIN** is for inline, compressible data. + - **EXTERNAL** is for external, uncompressed data. Use of **EXTERNAL** will make substring operations on **text** and **bytea** values run faster, at the penalty of increased storage space. + - **EXTENDED** is for external, compressed data. **EXTENDED** is the default for most data types that support non-**PLAIN** storage. + +- **CHECK \( expression \)** + + New rows or rows to be updated must satisfy for an expression to be true. If any row produces a false result, an error is raised and the database is not modified. + + A check constraint specified as a column constraint should reference only the column's values, while an expression appearing in a table constraint can reference multiple columns. + + Currently, **CHECK \( expression \)** does not include subqueries and cannot use variables apart from the current column. + +- **DEFAULT default\_expr** + + Assigns a default data value for a column. + + The data type of the default expression must match the data type of the column. + + The default expression will be used in any insert operation that does not specify a value for the column. If there is no default value for a column, then the default value is null. + +- **UNIQUE index\_parameters** + + **UNIQUE \( column\_name \[, ... \] \) index\_parameters** + + Specifies that a group of one or more columns of a table can contain only unique values. + +- **PRIMARY KEY index\_parameters** + + **PRIMARY KEY \( column\_name \[, ... \] \) index\_parameters** + + The primary key constraint specifies that a column or columns of a table can contain only unique \(non-duplicate\) and non-null values. + +- **DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE** + + Sets whether the constraint can be deferrable. + + - **DEFERRABLE**: deferrable to the end of the transaction and checks using **SET CONSTRAINTS**. + - **NOT DEFERRABLE**: checks immediately after the execution of each command. + - **INITIALLY IMMEDIATE**: checks immediately after the execution of each statement. + - **INITIALLY DEFERRED**: checks when the transaction ends. + +- **WITH \( \{storage\_parameter = value\} \[, ... \] \)** + + Specifies an optional storage parameter for a table or an index. + +- **tablespace\_name** + + Specifies the name of the tablespace where the index locates. + +- **COMPRESS|NOCOMPRESS** + - **NOCOMPRESS**: If the **NOCOMPRESS** keyword is specified, the existing compression feature of the table will not be changed. + - **COMPRESS**: If the **COMPRESS** keyword is specified, the table compression feature will be triggered by batch tuple insertion. + +- **new\_table\_name** + + Specifies the new table name. + +- **new\_column\_name** + + Specifies the new name of a specific column in a table. + +- **new\_constraint\_name** + + Specifies the new name of a table constraint. + +- **new\_schema** + + Specifies the new schema name. + +- **CASCADE** + + Automatically drops objects that depend on the dropped column or constraint \(for example, views referencing the column\). + +- **RESTRICT** + + Refuses to drop the column or constraint if there are any dependent objects. This is the default behavior. + +- **schema\_name** + + Specifies the schema name of a table. + + +## Examples + +See [Example:](create-table.md#en-us_topic_0237122117_en-us_topic_0059778169_s86758dcf05d442d2a9ebd272e76ed1b8) in **CREATE TABLE**. + +## Helpful Links + +[CREATE TABLE](create-table.md) and [DROP TABLE](drop-table.md) + diff --git a/content/en/docs/Developerguide/alter-tablespace.md b/content/en/docs/Developerguide/alter-tablespace.md new file mode 100644 index 000000000..17160e311 --- /dev/null +++ b/content/en/docs/Developerguide/alter-tablespace.md @@ -0,0 +1,119 @@ +# ALTER TABLESPACE + +## Function + +**ALTER TABLESPACE** modifies the attributes of a tablespace. + +## Precautions + +- Only the owner of a tablespace or a system administrator can execute the **ALTER TABLESPACE** statement. +- To change the owner, you must also be a direct or indirect member of the new owning role. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >If **new\_owner** is the same as **old\_owner**, the current user will not be verified. A message indicating successful **ALTER** execution is displayed. + + +## Syntax + +- The syntax of renaming a tablespace is as follows: + + ``` + ALTER TABLESPACE tablespace_name + RENAME TO new_tablespace_name; + ``` + +- The syntax of setting the owner of a tablespace is as follows: + + ``` + ALTER TABLESPACE tablespace_name + OWNER TO new_owner; + ``` + +- The syntax of setting the attributes of a tablespace is as follows: + + ``` + ALTER TABLESPACE tablespace_name + SET ( {tablespace_option = value} [, ... ] ); + ``` + +- The syntax of resetting the attributes of a tablespace is as follows: + + ``` + ALTER TABLESPACE tablespace_name + RESET ( { tablespace_option } [, ...] ); + ``` + +- The syntax of setting the quota of a tablespace is as follows: + + ``` + ALTER TABLESPACE tablespace_name + RESIZE MAXSIZE { UNLIMITED | 'space_size'}; + ``` + + +## Parameter Description + +- **tablespace\_name** + + Specifies the tablespace to be modified. + + Value range: an existing table name + +- **new\_tablespace\_name** + + Specifies the new name of a tablespace. + + The new name cannot start with **PG\_**. + + Value range: a string. It must comply with the naming convention rule. + +- **new\_owner** + + Specifies the new owner of the tablespace. + + Value range: an existing username + +- **tablespace\_option** + + Sets or resets the parameters of a tablespace. + + Value range: + + - seq\_page\_cost: sets the optimizer to calculate the cost of obtaining disk pages in sequence. The default value is **1.0**. + - **random\_page\_cost**: sets the optimizer to calculate the cost of obtaining disk pages in a non-sequential manner. The default value is **4.0**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The value of **random\_page\_cost** is relative to that of **seq\_page\_cost**. It is meaningless when the value is equal to or less than the value of **seq\_page\_cost**. + >- The prerequisite for the default value **4.0** is that the optimizer uses indexes to scan table data and the hit ratio of table data in the cache is about 90%. + >- If the size of the table data space is smaller than that of the physical memory, decrease the value to a proper level. On the contrary, if the hit ratio of table data in the cache is lower than 90%, increase the value. + >- If random-access memory like SSD is adopted, the value can be decreased to a certain degree to reflect the cost of true random scan. + + + Value range: a positive floating point number + +- **RESIZE MAXSIZE** + + Resets the maximum size of tablespace. + + Value range: + + - **UNLIMITED**: No limit is set for this tablespace. + - Determined by **space\_size**. For details about the format, see [CREATE TABLESPACE](create-tablespace.md). + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >You can also use the following statement to change the value of **MAXSIZE**: + >``` + >ALTER TABLESPACE tablespace_name RESIZE MAXSIZE + > { 'UNLIMITED' | 'space_size'}; + >``` + + + +## Examples + +See [Examples](create-tablespace.md#en-us_topic_0237122120_en-us_topic_0059777670_s4e5e97caa377440d87fad0d49b56323e) in **CREATE TABLESPACE**. + +## Helpful Links + +[CREATE TABLESPACE](create-tablespace.md) and [DROP TABLESPACE](drop-tablespace.md) + diff --git a/content/en/docs/Developerguide/alter-text-search-configuration.md b/content/en/docs/Developerguide/alter-text-search-configuration.md new file mode 100644 index 000000000..c07e5438a --- /dev/null +++ b/content/en/docs/Developerguide/alter-text-search-configuration.md @@ -0,0 +1,175 @@ +# ALTER TEXT SEARCH CONFIGURATION + +## Function + +**ALTER TEXT SEARCH CONFIGURATION** modifies the definition of a text search configuration. You can modify its mappings from token types to dictionaries, change the configuration's name or owner, or modify the parameters. + +The **ADD MAPPING FOR** form installs a list of dictionaries to be consulted for the specified token types; an error will be generated if there is already a mapping for any of the token types. + +The **ALTER MAPPING FOR** form removes existing mapping for those token types and then adds specified mappings. + +**ALTER MAPPING REPLACE ... ** **WITH ... ** and **ALTER MAPPING FOR...** **REPLACE ... ** **WITH ...** options replace **old\_dictionary** with **new\_dictionary**. Note that only when **pg\_ts\_config\_map** has tuples corresponding to **maptokentype** and **old\_dictionary**, the update will succeed. If the update fails, no messages are returned. + +The **DROP MAPPING FOR** form deletes all dictionaries for the specified token types in the text search configuration. If **IF EXISTS** is not specified and the string type mapping specified by **DROP MAPPING FOR** does not exist in text search configuration, an error will occur in the database. + +## Precautions + +- If a search configuration is referenced \(to create indexes\), users are not allowed to modify the text search configuration. +- To use **ALTER TEXT SEARCH CONFIGURATION**, you must be the owner of the configuration. + +## Syntax + +- Add text search configuration string mapping. + +``` +ALTER TEXT SEARCH CONFIGURATION name + ADD MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ]; +``` + +- Modify the text search configuration dictionary syntax. + +``` +ALTER TEXT SEARCH CONFIGURATION name + ALTER MAPPING FOR token_type [, ... ] REPLACE old_dictionary WITH new_dictionary; +``` + +- Modify the text search configuration string. + +``` +ALTER TEXT SEARCH CONFIGURATION name + ALTER MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ]; +``` + +- Change the text search configuration dictionary. + +``` +ALTER TEXT SEARCH CONFIGURATION name + ALTER MAPPING REPLACE old_dictionary WITH new_dictionary; +``` + +- Remove text search configuration string mapping. + +``` +ALTER TEXT SEARCH CONFIGURATION name + DROP MAPPING [ IF EXISTS ] FOR token_type [, ... ]; +``` + +- Rename the owner of text search configuration. + +``` +ALTER TEXT SEARCH CONFIGURATION name OWNER TO new_owner; +``` + +- Rename the text search configuration. + +``` +ALTER TEXT SEARCH CONFIGURATION name RENAME TO new_name; +``` + +- Rename the namespace of text search configuration. + +``` +ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA new_schema; +``` + +- Modify the attributes of the text search configuration. + +``` +ALTER TEXT SEARCH CONFIGURATION name SET ( { configuration_option = value } [, ...] ); +``` + +- Reset the attributes of text search configuration. + +``` +ALTER TEXT SEARCH CONFIGURATION name RESET ( {configuration_option} [, ...] ); +``` + +## Parameter Description + +- **name** + + Specifies the name \(optionally schema-qualified\) of an existing text search configuration. + +- **token\_type** + + Specifies the name of a token type that is emitted by the configuration's parser. For details, see [Parser](parser.md). + +- **dictionary\_name** + + Specifies the name of a text search dictionary. If multiple dictionaries are listed, they are searched in the specified order. + +- **old\_dictionary** + + Specifies the name of a text search dictionary to be replaced in the mapping. + +- **new\_dictionary** + + Specifies the name of a text search dictionary to be substituted for **old\_dictionary**. + +- **new\_owner** + + Specifies the new owner of the text search configuration. + +- **new\_name** + + Specifies the new name of the text search configuration. + +- **new\_schema** + + Specifies the new schema for the text search configuration. + +- **configuration\_option** + + Specifies the text search configuration option. For details, see [CREATE TEXT SEARCH CONFIGURATION](create-text-search-configuration.md). + +- **value** + + Specifies the value of text search configuration option. + + +## Examples + +``` +-- Create a text search configuration. +postgres=# CREATE TEXT SEARCH CONFIGURATION english_1 (parser=default); +CREATE TEXT SEARCH CONFIGURATION + +-- Add text search configuration string mapping. +postgres=# ALTER TEXT SEARCH CONFIGURATION english_1 ADD MAPPING FOR word WITH simple,english_stem; +ALTER TEXT SEARCH CONFIGURATION + +-- Add text search configuration string mapping. +postgres=# ALTER TEXT SEARCH CONFIGURATION english_1 ADD MAPPING FOR email WITH english_stem, french_stem; +ALTER TEXT SEARCH CONFIGURATION + +-- Query information about the text search configuration. +postgres=# SELECT b.cfgname,a.maptokentype,a.mapseqno,a.mapdict,c.dictname FROM pg_ts_config_map a,pg_ts_config b, pg_ts_dict c WHERE a.mapcfg=b.oid AND a.mapdict=c.oid AND b.cfgname='english_1' ORDER BY 1,2,3,4,5; + cfgname | maptokentype | mapseqno | mapdict | dictname +-----------+--------------+----------+---------+-------------- + english_1 | 2 | 1 | 3765 | simple + english_1 | 2 | 2 | 12960 | english_stem + english_1 | 4 | 1 | 12960 | english_stem + english_1 | 4 | 2 | 12964 | french_stem +(4 rows) + +-- Add text search configuration string mapping. +postgres=# ALTER TEXT SEARCH CONFIGURATION english_1 ALTER MAPPING REPLACE french_stem with german_stem; +ALTER TEXT SEARCH CONFIGURATION + +-- Query information about the text search configuration. +postgres=# SELECT b.cfgname,a.maptokentype,a.mapseqno,a.mapdict,c.dictname FROM pg_ts_config_map a,pg_ts_config b, pg_ts_dict c WHERE a.mapcfg=b.oid AND a.mapdict=c.oid AND b.cfgname='english_1' ORDER BY 1,2,3,4,5; + cfgname | maptokentype | mapseqno | mapdict | dictname +-----------+--------------+----------+---------+-------------- + english_1 | 2 | 1 | 3765 | simple + english_1 | 2 | 2 | 12960 | english_stem + english_1 | 4 | 1 | 12960 | english_stem + english_1 | 4 | 2 | 12966 | german_stem +(4 rows) +``` + +See [Examples](create-text-search-configuration.md#en-us_topic_0237122121_en-us_topic_0059777835_sc3a4aef5c0c0420eaf5a2e67097004a2) in **CREATE TEXT SEARCH CONFIGURATION**. + +## Helpful Links + +[CREATE TEXT SEARCH CONFIGURATION](create-text-search-configuration.md) and [DROP TEXT SEARCH CONFIGURATION](drop-text-search-configuration.md) + diff --git a/content/en/docs/Developerguide/alter-text-search-dictionary.md b/content/en/docs/Developerguide/alter-text-search-dictionary.md new file mode 100644 index 000000000..9a2ca627c --- /dev/null +++ b/content/en/docs/Developerguide/alter-text-search-dictionary.md @@ -0,0 +1,101 @@ +# ALTER TEXT SEARCH DICTIONARY + +## Function + +**ALTER TEXT SEARCH DICTIONARY** modifies the definition of a full-text search dictionary, including its parameters, name, owner, and schema. + +## Precautions + +- Predefined dictionaries do not support the **ALTER** operations. +- Only the owner of a dictionary or a system administrator can perform the **ALTER** operations. +- After a dictionary is created or modified, any modification to the customized dictionary definition file in the **filepath** directory does not affect the dictionary in the database. To use these modifications in the database, run the **ALTER TEXT SEARCH DICTIONARY** statement to update the definition file of the corresponding dictionary. + +## Syntax + +- Modify the dictionary definition. + + ``` + ALTER TEXT SEARCH DICTIONARY name ( + option [ = value ] [, ... ] + ); + ``` + + +- Rename a dictionary. + + ``` + ALTER TEXT SEARCH DICTIONARY name RENAME TO new_name; + ``` + +- Set the schema of the dictionary. + + ``` + ALTER TEXT SEARCH DICTIONARY name SET SCHEMA new_schema; + ``` + +- Change the owner of the dictionary. + + ``` + ALTER TEXT SEARCH DICTIONARY name OWNER TO new_owner; + ``` + + +## Parameter Description + +- **name** + + Specifies the name of an existing dictionary. \(If you do not specify a schema name, the dictionary in the current schema will be used.\) + + Value range: an existing dictionary name + +- **option** + + Specifies the parameter name to be modified. Each type of dictionaries has a template containing their custom parameters. Parameters function in a way irrelevant to their setting sequence. For details about the parameters, see [option](create-text-search-dictionary.md#en-us_topic_0237122122_li1286812455448). + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The value of **TEMPLATE** in the dictionary cannot be changed. + >- To specify a dictionary, specify both the dictionary definition file path \(**FILEPATH**\) and the file name. + >- The name of a dictionary definition file can contain only lowercase letters, digits, and underscores \(\_\). + +- **value** + + Specifies the new value of a parameter. If the equal sign \(=\) and _value_ are omitted, the previous settings of the option are deleted and the default value is used. + + Value range: valid values defined by **option**. + +- **new\_name** + + Specifies the new name of a dictionary. + + Value range: a string, which complies with the identifier naming convention. A value can contain a maximum of 63 characters. + +- **new\_owner** + + Specifies the new owner of a dictionary. + + Value range: an existing username + +- **new\_schema** + + Specifies the new schema of a dictionary. + + Value range: an existing schema + + +## Examples + +``` +-- Modify the definition of stop words in Snowball dictionaries. Retain the values of other parameters. +postgres=# ALTER TEXT SEARCH DICTIONARY my_dict ( StopWords = newrussian, FilePath = 'file:///home/dicts' ); + +-- Modify the Language parameter in Snowball dictionaries and delete the definition of stop words. +postgres=# ALTER TEXT SEARCH DICTIONARY my_dict (Language = dutch, StopWords); + +-- Update the dictionary definition and do not change any other content. +postgres=# ALTER TEXT SEARCH DICTIONARY my_dict ( dummy ); +``` + +## Helpful Links + +[CREATE TEXT SEARCH DICTIONARY](create-text-search-dictionary.md) and [DROP TEXT SEARCH DICTIONARY](drop-text-search-dictionary.md) + diff --git a/content/en/docs/Developerguide/alter-trigger.md b/content/en/docs/Developerguide/alter-trigger.md new file mode 100644 index 000000000..e929c09d8 --- /dev/null +++ b/content/en/docs/Developerguide/alter-trigger.md @@ -0,0 +1,45 @@ +# ALTER TRIGGER + +## Function + +**ALTER TRIGGER** modifies the definition of a trigger. + +## Precautions + +Only the owner of a table where the trigger is created and a system administrator can run the **ALTER TRIGGER** statement. + +## Syntax + +``` +ALTER TRIGGER trigger_name ON table_name RENAME TO new_name; +``` + +## Parameter Description + +- **trigger\_name** + + Specifies the name of the trigger to be modified. + + Value range: an existing trigger + +- **table\_name** + + Specifies the name of the table where the trigger to be modified is located. + + Value range: an existing table having a trigger + +- **new\_name** + + Specifies the new name after modification. + + Value range: a string, which complies with the identifier naming convention. A value contains a maximum of 63 characters and cannot be the same as other triggers on the same table. + + +## Examples + +See examples in [CREATE TRIGGER](create-trigger.md). + +## Helpful Links + +[CREATE TRIGGER](create-trigger.md), [DROP TRIGGER](drop-trigger.md), and [ALTER TABLE](alter-table.md) + diff --git a/content/en/docs/Developerguide/alter-type.md b/content/en/docs/Developerguide/alter-type.md new file mode 100644 index 000000000..f8e01cde6 --- /dev/null +++ b/content/en/docs/Developerguide/alter-type.md @@ -0,0 +1,152 @@ +# ALTER TYPE + +## Function + +**ALTER TYPE** modifies the definition of a type. + +## Syntax + +- Modify a type. + + ``` + ALTER TYPE name action [, ... ] + ALTER TYPE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + ALTER TYPE name RENAME ATTRIBUTE attribute_name TO new_attribute_name [ CASCADE | RESTRICT ] + ALTER TYPE name RENAME TO new_name + ALTER TYPE name SET SCHEMA new_schema + ALTER TYPE name ADD VALUE [ IF NOT EXISTS ] new_enum_value [ { BEFORE | AFTER } neighbor_enum_value ] + ALTER TYPE name RENAME VALUE existing_enum_value TO new_enum_value + + where action is one of: + ADD ATTRIBUTE attribute_name data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] + DROP ATTRIBUTE [ IF EXISTS ] attribute_name [ CASCADE | RESTRICT ] + ALTER ATTRIBUTE attribute_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] + ``` + +- Add a new attribute to a composite type. + + ``` + ALTER TYPE name ADD ATTRIBUTE attribute_name data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] + ``` + + +- Delete an attribute from a composite type. + + ``` + ALTER TYPE name DROP ATTRIBUTE [ IF EXISTS ] attribute_name [ CASCADE | RESTRICT ] + ``` + + +- Change the type of an attribute in a composite type. + + ``` + ALTER TYPE name ALTER ATTRIBUTE attribute_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] + ``` + + +- Change the owner of a type. + + ``` + ALTER TYPE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + ``` + + +- Change the name of a type or the name of an attribute in a composite type. + + ``` + ALTER TYPE name RENAME TO new_name + ALTER TYPE name RENAME ATTRIBUTE attribute_name TO new_attribute_name [ CASCADE | RESTRICT ] + ``` + + +- Move a type to a new schema. + + ``` + ALTER TYPE name SET SCHEMA new_schema + ``` + + +- Add a new value to an enumerated type. + + ``` + ALTER TYPE name ADD VALUE [ IF NOT EXISTS ] new_enum_value [ { BEFORE | AFTER } neighbor_enum_value ] + ``` + + +- Change an enumerated value in the value list. + + ``` + ALTER TYPE name RENAME VALUE existing_enum_value TO new_enum_value + ``` + + +## Parameter Description + +- **name** + + Specifies the name of an existing type that needs to be modified \(optionally schema-qualified\). + + +- **new\_name** + + Specifies the new name of the type. + + +- **new\_owner** + + Specifies the new owner of the type. + + +- **new\_schema** + + Specifies the new schema of the type. + + +- **attribute\_name** + + Specifies the name of the attribute to be added, modified, or deleted. + + +- **new\_attribute\_name** + + Specifies the new name of the attribute to be renamed. + + +- **data\_type** + + Specifies the data type of the attribute to be added, or the new type of the attribute to be modified. + +- **new\_enum\_value** + + Specifies a new enumerated value. It is a non-null string with a maximum length of 64 bytes. + +- **neighbor\_enum\_value** + + Specifies an existing enumerated value before or after which a new enumerated value will be added. + +- **existing\_enum\_value** + + Specifies an enumerated value to be changed. It is a non-null string with a maximum length of 64 bytes. + + +- **CASCADE** + + Determines that the type to be modified, its associated records, and subtables that inherit the type will all be updated. + +- **RESTRICT** + + Refuses to update the associated records of the modified type. This is the default action. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- **ADD ATTRIBUTE**, **DROP ATTRIBUTE**, and **ALTER ATTRIBUTE** can be combined for processing. For example, it is possible to add several attributes or change the types of several attributes at the same time in one command. + >- To use **ALTER TYPE**, you must be the owner of the type. To modify a schema of a type, you must also have the **CREATE** permission on the new schema. To change the owner, you must be a direct or indirect member of the new owning role, and the member must have the **CREATE** permission on the schema of this type. \(These restrictions enforce that the user can only recreate and delete the type. However, the system administrator can change ownership of any type in any way.\) To add an attribute or modify the type of an attribute, you must also have the **USAGE** permission of this type. + + +## Example + +See [Examples](create-type.md#en-us_topic_0237122124_en-us_topic_0059779377_s66a0b4a6a1df4ba4a116c6c565a0fe9d) in **CREATE TYPE**. + +## Helpful Links + +[CREATE TYPE](create-type.md) and [DROP TYPE](drop-type.md) + diff --git a/content/en/docs/Developerguide/alter-user.md b/content/en/docs/Developerguide/alter-user.md new file mode 100644 index 000000000..7c45c25ac --- /dev/null +++ b/content/en/docs/Developerguide/alter-user.md @@ -0,0 +1,111 @@ +# ALTER USER + +## Function + +**ALTER USER** modifies the attributes of a database user. + +## Precautions + +Session parameters modified by **ALTER USER** apply to a specified user and take effect in the next session. + +## Syntax + +- Modify user permissions or other information. + + ``` + ALTER USER user_name [ [ WITH ] option [ ... ] ]; + ``` + + The **option** clause is as follows: + + ``` + { CREATEDB | NOCREATEDB } + | { CREATEROLE | NOCREATEROLE } + | { INHERIT | NOINHERIT } + | { AUDITADMIN | NOAUDITADMIN } + | { SYSADMIN | NOSYSADMIN } + | {MONADMIN | NOMONADMIN} + | {OPRADMIN | NOOPRADMIN} + | {POLADMIN | NOPOLADMIN} + | { USEFT | NOUSEFT } + | { LOGIN | NOLOGIN } + | { REPLICATION | NOREPLICATION } + | {INDEPENDENT | NOINDEPENDENT} + | {VCADMIN | NOVCADMIN} + | CONNECTION LIMIT connlimit + | [ ENCRYPTED | UNENCRYPTED ] PASSWORD { 'password' | DISABLE } + | [ ENCRYPTED | UNENCRYPTED ] IDENTIFIED BY { 'password' [ REPLACE 'old_password' ] | DISABLE } + | VALID BEGIN 'timestamp' + | VALID UNTIL 'timestamp' + | RESOURCE POOL 'respool' + | PERM SPACE 'spacelimit' + | ACCOUNT { LOCK | UNLOCK } + | PGUSER + ``` + +- Change the username. + + ``` + ALTER USER user_name + RENAME TO new_name; + ``` + +- Change the value of a specified parameter associated with the user. + + ``` + ALTER USER user_name + SET configuration_parameter { { TO | = } { value | DEFAULT } | FROM CURRENT }; + ``` + +- Reset the value of a specified parameter associated with the user. + + ``` + ALTER USER user_name + RESET { configuration_parameter | ALL }; + ``` + + +## Parameter Description + +- **user\_name** + + Specifies the current username. + + Value range: an existing username + +- **new\_password** + + Specifies a new password. + + The new password must: + + - Differ from the old password. + - Contain at least eight characters. This is the default length. + - Differ from the username or the username spelled backward. + - Contain at least three types of the following four types of characters: uppercase characters \(A to Z\), lowercase characters \(a to z\), digits \(0 to 9\), and special characters, including: \~!@\#$%^&\*\(\)-\_=+\\|\[\{\}\];:,<.\>/? + + Value range: a string + +- **old\_password** + + Specifies the old password. + +- **ACCOUNT LOCK | ACCOUNT UNLOCK** + - **ACCOUNT LOCK**: locks an account to forbid login to databases. + - **ACCOUNT UNLOCK**: unlocks an account to allow login to databases. + +- **PGUSER** + + In the current version, the **PGUSER** attribute of a user cannot be modified. + + +For details about other parameters, see "Parameter Description" in [CREATE ROLE](create-role.md) and [ALTER ROLE](alter-role.md). + +## Example + +See [Example](create-user.md#en-us_topic_0237122125_en-us_topic_0059778166_sfbca773f5bcd4799b3ea668b3eb074fa) in **CREATE USER**. + +## Helpful Links + +[CREATE ROLE](create-role.md), [CREATE USER](create-user.md), and [DROP USER](drop-user.md) + diff --git a/content/en/docs/Developerguide/alter-view.md b/content/en/docs/Developerguide/alter-view.md new file mode 100644 index 000000000..685c5683a --- /dev/null +++ b/content/en/docs/Developerguide/alter-view.md @@ -0,0 +1,130 @@ +# ALTER VIEW + +## Function + +**ALTER VIEW** modifies all auxiliary attributes of a view. \(To modify the query definition of a view, use **CREATE OR REPLACE VIEW**.\) + +## Precautions + +- Only the owner of a view can use **ALTER VIEW**. +- To change the schema of a view, you must have the **CREATE** permission on the new schema. +- To change the owner of a view, you must be a direct or indirect member of the new owning role, and the member must have the **CREATE** permission on the view's schema. +- An administrator can change the owner relationship of any view. + +## Syntax + +- Set the default value of a view column. + + ``` + ALTER VIEW [ IF EXISTS ] view_name + ALTER [ COLUMN ] column_name SET DEFAULT expression; + ``` + +- Remove the default value of a view column. + + ``` + ALTER VIEW [ IF EXISTS ] view_name + ALTER [ COLUMN ] column_name DROP DEFAULT; + ``` + +- Change the owner of a view. + + ``` + ALTER VIEW [ IF EXISTS ] view_name + OWNER TO new_owner; + ``` + +- Rename a view. + + ``` + ALTER VIEW [ IF EXISTS ] view_name + RENAME TO new_name; + ``` + +- Set the schema of a view. + + ``` + ALTER VIEW [ IF EXISTS ] view_name + SET SCHEMA new_schema; + ``` + +- Set the options of a view. + + ``` + ALTER VIEW [ IF EXISTS ] view_name + SET ( { view_option_name [ = view_option_value ] } [, ... ] ); + ``` + +- Reset the options of a view. + + ``` + ALTER VIEW [ IF EXISTS ] view_name + RESET ( view_option_name [, ... ] ); + ``` + + +## Parameter Description + +- **IF EXISTS** + + If this option is used, no error is generated when the view does not exist, and only a message is displayed. + +- **view\_name** + + Specifies the view name, which can be schema-qualified. + + Value range: a string. It must comply with the naming convention rule. + +- **column\_name** + + Specifies an optional list of names to be used for columns of the view. If not given, the column names are deduced from the query. + + Value range: a string. It must comply with the naming convention rule. + +- **SET/DROP DEFAULT** + + Sets or deletes the default value of a column. this parameter does not take effect. + +- **new\_owner** + + Specifies the new owner of a view. + +- **new\_name** + + Specifies the new view name. + +- **new\_schema** + + Specifies the new schema of the view. + +- **view\_option\_name \[ = view\_option\_value \]** + + Specifies an optional parameter for a view. + + Currently, **view\_option\_name** supports only the **security\_barrier** parameter. This parameter is used when the view attempts to provide row-level security. + + Value range: Boolean type, **TRUE**, and **FALSE**. + + +## Examples + +``` +-- Create a view consisting of rows with c_customer_sk less than 150. +postgres=# CREATE VIEW tpcds.customer_details_view_v1 AS + SELECT * FROM tpcds.customer + WHERE c_customer_sk < 150; + +-- Rename a view. +postgres=# ALTER VIEW tpcds.customer_details_view_v1 RENAME TO customer_details_view_v2; + +-- Change the schema of a view. +postgres=# ALTER VIEW tpcds.customer_details_view_v2 SET schema public; + +-- Delete a view. +postgres=# DROP VIEW public.customer_details_view_v2; +``` + +## Helpful Links + +[CREATE VIEW](create-view.md) and [DROP VIEW](drop-view.md) + diff --git a/content/en/docs/Developerguide/analyze-analyse.md b/content/en/docs/Developerguide/analyze-analyse.md new file mode 100644 index 000000000..ba739827f --- /dev/null +++ b/content/en/docs/Developerguide/analyze-analyse.md @@ -0,0 +1,192 @@ +# ANALYZE | ANALYSE + +## Function + +**ANALYZE** collects statistics about ordinary tables in a database, and stores the results in the **PG\_STATISTIC** system catalog. The execution plan generator uses these statistics to determine which one is the most effective execution plan. + +If no parameter is specified, **ANALYZE** analyzes each table and partitioned table in the current database. You can also specify the **table\_name**, **column**, and **partition\_name** parameters to restrict the analysis to a specific table, column, or partitioned table. + +**ANALYZE | ANALYSE VERIFY** is used to check whether data files of common tables \(row-store and column-store tables\) in a database are damaged. + +## Precautions + +Non-temporary tables cannot be analyzed in an anonymous block, transaction block, function, or stored procedure. Temporary tables in a stored procedure can be analyzed but their statistics updates cannot be rolled back. + +The **ANALYZE VERIFY** operation is used to detect abnormal scenarios. The **RELEASE** version is required. In the **ANALYZE VERIFY** scenario, remote read is not triggered. Therefore, the remote read parameter does not take effect. If the system detects that a page is damaged due to an error in a key system table, the system directly reports an error and does not continue the detection. + +## Syntax + +- Collect statistics information about a table. + + ``` + { ANALYZE | ANALYSE } [ VERBOSE ] + [ table_name [ ( column_name [, ...] ) ] ]; + ``` + + +- Collect statistics about a partitioned table. + + ``` + { ANALYZE | ANALYSE } [ VERBOSE ] + [ table_name [ ( column_name [, ...] ) ] ] + PARTITION ( patrition_name ) ; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >An ordinary partitioned table supports the syntax but not the function of collecting statistics about specified partitions. + + +- Collect statistics about multiple columns. + + ``` + {ANALYZE | ANALYSE} [ VERBOSE ] + table_name (( column_1_name, column_2_name [, ...] )); + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- When collecting statistics about multiple columns, set GUC parameter [default\_statistics\_target](other-optimizer-options.md#en-us_topic_0237124719_en-us_topic_0059779049_se18c86fcdf5e4a22870f71187436d815) to a negative value to sample data in percentage. + >- The statistics about a maximum of 32 columns can be collected at a time. + >- You are not allowed to collect statistics about multiple columns in system catalogs. + + +- Check the data files in the current database. + + ``` + {ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE}; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- In fast mode, DML operations need to be performed on the tables to be verified concurrently. As a result, an error is reported during the verification. In the current fast mode, data is directly read from the disk. When other threads modify files concurrently, the obtained data is incorrect. Therefore, you are advised to perform the verification offline. + >- You can perform operations on the entire database. Because a large number of tables are involved, you are advised to save the result **gsql -d database -p port -f "verify.sql"\> verify\_warning.txt 2\>&1** in redirection mode. + >- Temporary tables and unlogged tables are not supported. + >- NOTICE is used to check only tables that are visible to external systems. The detection of internal tables is included in the external tables on which NOTICE depends and is not displayed externally. + >- This statement can be executed with error tolerance. The **Assert** of the debug version may cause the core to fail to execute commands. Therefore, you are advised to perform the operations in release mode. + >- If a key system table is damaged during a full database operation, an error is reported and the operation stops. + +- Check data files of tables and indexes. + + ``` + {ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE} table_name|index_name [CASCADE]; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- Operations on ordinary tables and index tables are supported, but **CASCADE** operations on indexes of index tables are not supported. The **CASCADE** mode is used to process all index tables of the primary table. When the index tables are checked separately, the **CASCADE** mode is not required. + >- Temporary tables and unlogged tables are not supported. + >- When the primary table is checked, the internal tables of the primary table, such as the toast table and cudesc table, are also checked. + >- When the system displays a message indicating that the index table is damaged, you are advised to run the **reindex** command to recreate the index. + +- Check the data files of the table partition. + +``` +{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE} table_name PARTITION {(patrition_name)}[CASCADE]; +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- You can check a single partition of a table, but cannot perform the **CASCADE** operation on the indexes of an index table. +>- Temporary tables and unlogged tables are not supported. + +## Parameter Description + +- **VERBOSE** + + Enables the display of progress messages. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >If **VERBOSE** is specified, **ANALYZE** displays the progress information, indicating the table that is being processed. Statistics about tables are also displayed. + +- **table\_name** + + Specifies the name \(possibly schema-qualified\) of a specific table to analyze. If omitted, all regular tables \(but not foreign tables\) in the current database are analyzed. + + Currently, you can use **ANALYZE** to collect statistics only from row-store tables and column-store tables. + + Value range: an existing table name + +- **column\_name**, column\_1\_name, column\_2\_name + + Specifies the name of a specific column to analyze. All columns are analyzed by default. + + Value range: an existing column name + +- **partition\_name** + + Assumes the table is a partitioned table. You can specify **partition\_name** following the keyword **PARTITION** to analyze the statistics of this table. Currently, **ANALYZE** can be performed on partitioned tables, but statistics of specified partitions cannot be analyzed. + + Value range: a partition name of a table + +- **index\_name** + + Specifies the name of the specific index table to be analyzed \(possibly schema-qualified\). + + Value range: an existing table name + +- **FAST|COMPLETE** + + For a row-store table, the **FAST** mode verifies the CRC and page header of the row-store table. If the verification fails, an alarm is generated. In **COMPLETE** mode, the pointer and tuple of the row-store table are parsed and verified. For a column-store table, the **FAST** mode verifies the CRC and magic of the column-store table. If the verification fails, an alarm is generated. In **COMPLETE** mode, the CU of the column-store table is parsed and verified. + +- **CASCADE** + + In **CASCADE** mode, all indexes of the current table are verified. + + +## Examples + +-- Create a table. + +``` +postgres=# CREATE TABLE customer_info +( +WR_RETURNED_DATE_SK INTEGER , +WR_RETURNED_TIME_SK INTEGER , +WR_ITEM_SK INTEGER NOT NULL, +WR_REFUNDED_CUSTOMER_SK INTEGER +) +; +``` + +-- Create a partitioned table. + +``` +postgres=# CREATE TABLE customer_par +( +WR_RETURNED_DATE_SK INTEGER , +WR_RETURNED_TIME_SK INTEGER , +WR_ITEM_SK INTEGER NOT NULL, +WR_REFUNDED_CUSTOMER_SK INTEGER +) +PARTITION BY RANGE(WR_RETURNED_DATE_SK) +( +PARTITION P1 VALUES LESS THAN(2452275), +PARTITION P2 VALUES LESS THAN(2452640), +PARTITION P3 VALUES LESS THAN(2453000), +PARTITION P4 VALUES LESS THAN(MAXVALUE) +) +ENABLE ROW MOVEMENT; +``` + +-- Run **ANALYZE** to update statistics. + +``` +postgres=# ANALYZE customer; +``` + +-- Run **ANALYZE VERBOSE** statement to update statistics and display table information. + +``` +postgres=# ANALYZE VERBOSE customer_info; +INFO: analyzing "cstore.pg_delta_3394584009"(cn_5002 pid=53078) +INFO: analyzing "public.customer_info"(cn_5002 pid=53078) +INFO: analyzing "public.customer_info" inheritance tree(cn_5002 pid=53078) +ANALYZE +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If any environment-related fault occurs, check the logs of the primary node of the database. + +-- Delete the table. + +``` +postgres=# DROP TABLE customer; +postgres=# DROP TABLE customer_par; +``` + diff --git a/content/en/docs/Developerguide/analyze-table.md b/content/en/docs/Developerguide/analyze-table.md new file mode 100644 index 000000000..a19d664df --- /dev/null +++ b/content/en/docs/Developerguide/analyze-table.md @@ -0,0 +1,40 @@ +# ANALYZE Table + +The execution plan generator needs to use table statistics to generate the most effective query execution plan to improve query performance. After data is imported, you are advised to run the **ANALYZE** statement to update table statistics. The statistics are stored in the system catalog **PG\_STATISTIC**. + +## ANALYZE Table + +**ANALYZE** supports row-store and column-store tables. **ANALYZE** can also collect statistics about specified columns of a local table. For details on **ANALYZE**, see [ANALYZE | ANALYSE](analyze-analyse.md). + +1. Update table statistics. + + Do **ANALYZE** to the **product\_info** table. + + ``` + postgres=# ANALYZE product_info; + ``` + + ``` + ANALYZE + ``` + + +## autoanalyze + +openGauss provides the GUC parameter [autovacuum](automatic-vacuuming.md#en-us_topic_0237124730_en-us_topic_0059778244_s995913ca9df54ae5bb488d1e810bd824) to specify whether to enable the autovacuum function of the database. + +If **autovacuum** is set to **on**, the system will start the autovacuum thread to automatically analyze tables when the data volume in the table reaches the threshold. This is the autoanalyze function. + +- For an empty table, when the number of rows inserted to it is greater than 50, **ANALYZE** is automatically triggered. +- For a table containing data, the threshold is 50 + 10% x **reltuples**, where **reltuples** indicates the total number of rows in the table. + +The autovacuum function also depends on the following two GUC parameters in addition to **autovacuum**: + +- [track\_counts](query-and-index-statistics-collector.md#en-us_topic_0237124727_en-us_topic_0059779313_s3f4fb0b1004041f69e1454c701952411): This parameter must be set to **on** to enable statistics collection about the database. +- [autovacuum\_max\_workers](automatic-vacuuming.md#en-us_topic_0237124730_en-us_topic_0059778244_s76932f79410248ba8923017d19982673): This parameter must be set to a value greater than **0** to specify the maximum number of concurrent autovacuum threads. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>- The autoanalyze function supports the default sampling mode but not percentage sampling. +>- The autoanalyze function does not collect multi-column statistics, which only supports percentage sampling. +>- The autoanalyze function supports row-store and column-store tables and does not support foreign tables, temporary tables, unlogged tables, and TOAST tables. + diff --git a/content/en/docs/Developerguide/analyzing-hardware-bottlenecks.md b/content/en/docs/Developerguide/analyzing-hardware-bottlenecks.md new file mode 100644 index 000000000..89f461032 --- /dev/null +++ b/content/en/docs/Developerguide/analyzing-hardware-bottlenecks.md @@ -0,0 +1,13 @@ +# Analyzing Hardware Bottlenecks + +The CPU, memory, I/O, and network resource usage of each node in openGauss are obtained to check whether these resources are fully used and whether any bottleneck exists. + +- **[CPU](cpu.md)** +You can run the **top** command to check the CPU usage of each node in openGauss and analyze whether performance bottleneck caused by heavy CPU load exists. +- **[Memory](memory.md)** +Run the **top** command to check the memory usage of each node in openGauss and analyze whether a performance bottleneck occurs due to high memory usage. +- **[I/O](i-o.md)** +You can run the **iostat** or **pidstat** command, or use openGauss heath check tools to check the I/O usage and throughput on each node in openGauss and analyze whether performance bottleneck caused by I/O exists. +- **[Network](network.md)** +You can run the **sar** or **ifconfig** command to check the network status on each node in openGauss and analyze whether performance bottlenecks caused by network faults occur. + diff --git a/content/en/docs/Developerguide/anonymous-blocks.md b/content/en/docs/Developerguide/anonymous-blocks.md new file mode 100644 index 000000000..9eb9cb9a9 --- /dev/null +++ b/content/en/docs/Developerguide/anonymous-blocks.md @@ -0,0 +1,21 @@ +# Anonymous Blocks + +An anonymous block applies to a script infrequently executed or a one-off activity. An anonymous block is executed in a session and is not stored. + +## Syntax + +[Figure 1](#en-us_topic_0237122218_en-us_topic_0059779171_f19ed9f384e0646f29744951d7eec8c3b) shows the syntax diagrams for an anonymous block. + +**Figure 1** anonymous\_block::= +![](figures/anonymous_block.png "anonymous_block") + +Details about the syntax diagram are as follows: + +- The execute part of an anonymous block starts with a **BEGIN** statement, has a break with an **END** statement, and ends with a semicolon \(;\). Type a slash \(/\) and press **Enter** to execute the statement. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >The terminator "/" must be written in an independent row. + +- The declaration section includes the variable definition, type, and cursor definition. +- A simplest anonymous block does not execute any commands. At least one statement, even a **NULL** statement, must be presented in any implementation blocks. + diff --git a/content/en/docs/Developerguide/api-reference.md b/content/en/docs/Developerguide/api-reference.md new file mode 100644 index 000000000..153cde565 --- /dev/null +++ b/content/en/docs/Developerguide/api-reference.md @@ -0,0 +1,9 @@ +# API Reference + +- **[JDBC](jdbc.md)** + +- **[ODBC](odbc.md)** + +- **[libpq](libpq.md)** + + diff --git a/content/en/docs/Developerguide/apis.md b/content/en/docs/Developerguide/apis.md new file mode 100644 index 000000000..e41a92168 --- /dev/null +++ b/content/en/docs/Developerguide/apis.md @@ -0,0 +1,36 @@ +# APIs + +You can use standard database APIs, such as **ODBC** and **JDBC**, to develop openGauss-based applications. + +## Supported APIs + +Each application is an independent openGauss development project. APIs alleviate applications from directly operating in databases, and enhance the database portability, extensibility, and maintainability. [Table 1](#en-us_topic_0237120293_en-us_topic_0059777757_tc44f4815cb564ea182d5864daa2709b4) lists the APIs supported by openGauss and the download addresses. + +**Table 1** Database APIs + + + + + + + + + + + + + +

API

+

How to Obtain

+

ODBC

+
+

JDBC

+
  • Driver: openGauss-x.x-EULER-64bit-Jdbc.tar.gz
  • Driver: org.postgresql.Driver
+
+ +You can use **JDBC** and **ODBC** to connect to the database. Therefore, you need to [configure a remote connection](configuring-a-remote-connection.md) in openGauss. + +For details about more APIs, see [Application Development Guide](application-development-guide.md). + diff --git a/content/en/docs/Developerguide/appendix.md b/content/en/docs/Developerguide/appendix.md new file mode 100644 index 000000000..2a6ddaa08 --- /dev/null +++ b/content/en/docs/Developerguide/appendix.md @@ -0,0 +1,9 @@ +# Appendix + +- **[GIN Indexes](gin-indexes.md)** + +- **[Extended Functions](extended-functions.md)** + +- **[Extended Syntax](extended-syntax.md)** + + diff --git a/content/en/docs/Developerguide/application-development-guide.md b/content/en/docs/Developerguide/application-development-guide.md new file mode 100644 index 000000000..3465e8acd --- /dev/null +++ b/content/en/docs/Developerguide/application-development-guide.md @@ -0,0 +1,13 @@ +# Application Development Guide + +- **[Development Specifications](development-specifications.md)** + +- **[Development Based on JDBC](development-based-on-jdbc.md)** + +- **[Development Based on ODBC](development-based-on-odbc.md)** + +- **[Development Based on libpq](development-based-on-libpq.md)** + +- **[Commissioning](commissioning.md)** + + diff --git a/content/en/docs/Developerguide/archiving.md b/content/en/docs/Developerguide/archiving.md new file mode 100644 index 000000000..a1d85836e --- /dev/null +++ b/content/en/docs/Developerguide/archiving.md @@ -0,0 +1,50 @@ +# Archiving + +## archive\_mode + +**Parameter description**: Specifies whether to archive WALs. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>When **[wal\_level](settings.md#en-us_topic_0237124707_en-us_topic_0059778393_s2c76f5957066407a959191148f2c780f)** is set to **minimal**, the **archive\_mode** parameter is unavailable. + +**Value range**: Boolean + +- **on** indicates that the archiving is enabled. +- **off** indicates that the archiving is disabled. + +**Default value**: **off** + +## archive\_command + +**Parameter description:** Specifies the command set by the administrator to archive WALs. You are advised to set the archive log path to an absolute path. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>- Any **%p** in the string is replaced by the absolute path of the file to archive, and any **%f** is replaced by only the file name. \(The relative path is relative to the data directory.\) Use **%%** to embed an actual **%** character in the command. +>- This command returns zero only if it succeeds. The following shows the command: +> ``` +> archive_command = 'cp --remove-destination %p /mnt/server/archivedir/%f' +> ``` +>- **--remove-destination** indicates that files will be overwritten during the archiving. + +**Value range**: a string + +**Default value:** **\(disabled\)** + +## archive\_timeout + +**Parameter description**: Specifies the archiving period. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>- The server is forced to switch to a new WAL segment file when the period specified by this parameter has elapsed since the last file switch. +>- Archived files that are closed early due to a forced switch are still of the same length as full files. Therefore, a very short **archive\_timeout** will bloat the archive storage. You are advised to set **archive\_timeout** to **60s**. + +**Value range**: an integer ranging from 0 to _INT\_MAX_. The unit is second. **0** indicates that archiving timeout is disabled. + +**Default value**: **0** + diff --git a/content/en/docs/Developerguide/array-expressions.md b/content/en/docs/Developerguide/array-expressions.md new file mode 100644 index 000000000..b1d103a32 --- /dev/null +++ b/content/en/docs/Developerguide/array-expressions.md @@ -0,0 +1,94 @@ +# Array Expressions + +## IN + +_expression _**IN **_\(value \[, ...\]\)_ + +The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list meets the expression result on the left, the result of **IN** is **true**. If no result meets the requirements, the result of **IN** is **false**. + +For example: + +``` +postgres=# SELECT 8000+500 IN (10000, 9000) AS RESULT; + result +---------- + f +(1 row) +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If the expression result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of **IN** is **null** rather than **false**. This method is consistent with the Boolean rules used when SQL statements return empty values. + +## NOT IN + +_expression _**NOT IN**_ \(value \[, ...\]\)_ + +The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list does not meet the expression result on the left, the result of **NOT IN** is **true**. If any content meets the expression result, the result of **NOT IN** is **false**. + +For example: + +``` +postgres=# SELECT 8000+500 NOT IN (10000, 9000) AS RESULT; + result +---------- + t +(1 row) +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If the query statement result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of **NOT IN** is **null** rather than **false**. This method is consistent with the Boolean rules used when SQL statements return empty values. + +In all situations, **X NOT IN Y** equals to **NOT\(X IN Y\)**. + +## ANY/SOME \(array\) + +_expression operator _**ANY **_\(array expression\)_ + +_expression operator _**SOME **_\(array expression\)_ + +``` +postgres=# SELECT 8000+500 < SOME (array[10000,9000]) AS RESULT; + result +---------- + t +(1 row) + +``` + +``` +postgres=# SELECT 8000+500 < ANY (array[10000,9000]) AS RESULT; + result +---------- + t +(1 row) +``` + +The right-hand side is a parenthesized expression, which must yield an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value. + +- If at least one comparison result is true, the result of **ANY** is **true**. +- If no comparison result is true, the result of ANY is false. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If no comparison result is true and the array expression generates at least one null value, the value of ANY is NULL, rather than false. This method is consistent with the Boolean rules used when SQL statements return empty values. + +**SOME** is a synonym of **ANY**. + +## ALL \(array\) + +_expression operator _**ALL **_\(array expression\)_ + +The right-hand side is a parenthesized expression, which must yield an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value. + +- The result of **ALL** is **true** if all comparisons yield **true** \(including the case where the array has zero elements\). +- The result of **ALL** is **false** if one or multiple comparisons yield **false**. + +If the array expression yields a null array, the result of **ALL** will be null. If the left-hand expression yields null, the result of **ALL** is ordinarily null \(though a non-strict comparison operator could possibly yield a different result\). Also, if the right-hand array contains any null elements and no false comparison result is obtained, the result of **ALL** will be null, not true \(again, assuming a strict comparison operator\). This method is consistent with the Boolean rules used when SQL statements return empty values. + +``` +postgres=# SELECT 8000+500 < ALL (array[10000,9000]) AS RESULT; + result +---------- + t +(1 row) +``` + diff --git a/content/en/docs/Developerguide/array-functions-and-operators.md b/content/en/docs/Developerguide/array-functions-and-operators.md new file mode 100644 index 000000000..7c7f20603 --- /dev/null +++ b/content/en/docs/Developerguide/array-functions-and-operators.md @@ -0,0 +1,386 @@ +# Array Functions and Operators + +## Array Operators + +- = + + Description: Specifies whether two arrays are equal. + + For example: + + ``` + postgres=# SELECT ARRAY[1.1,2.1,3.1]::int[] = ARRAY[1,2,3] AS RESULT ; + result + -------- + t + (1 row) + ``` + +- <\> + + Description: Specifies whether two arrays are not equal. + + For example: + + ``` + postgres=# SELECT ARRAY[1,2,3] <> ARRAY[1,2,4] AS RESULT; + result + -------- + t + (1 row) + ``` + +- < + + Description: Specifies whether an array is less than another. + + For example: + + ``` + postgres=# SELECT ARRAY[1,2,3] < ARRAY[1,2,4] AS RESULT; + result + -------- + t + (1 row) + ``` + +- \> + + Description: Specifies whether an array is greater than another. + + For example: + + ``` + postgres=# SELECT ARRAY[1,4,3] > ARRAY[1,2,4] AS RESULT; + result + -------- + t + (1 row) + ``` + +- <= + + Description: Specifies whether an array is less than another. + + For example: + + ``` + postgres=# SELECT ARRAY[1,2,3] <= ARRAY[1,2,3] AS RESULT; + result + -------- + t + (1 row) + ``` + +- \>= + + Description: Specifies whether an array is greater than or equal to another. + + For example: + + ``` + postgres=# SELECT ARRAY[1,4,3] >= ARRAY[1,4,3] AS RESULT; + result + -------- + t + (1 row) + ``` + +- @\> + + Description: Specifies whether an array contains another. + + For example: + + ``` + postgres=# SELECT ARRAY[1,4,3] @> ARRAY[3,1] AS RESULT; + result + -------- + t + (1 row) + ``` + +- <@ + + Description: Specifies whether an array is contained in another. + + For example: + + ``` + postgres=# SELECT ARRAY[2,7] <@ ARRAY[1,7,4,2,6] AS RESULT; + result + -------- + t + (1 row) + ``` + +- && + + Description: Specifies whether an array overlaps another \(have common elements\). + + For example: + + ``` + postgres=# SELECT ARRAY[1,4,3] && ARRAY[2,1] AS RESULT; + result + -------- + t + (1 row) + ``` + +- || + + Description: Array-to-array concatenation + + For example: + + ``` + postgres=# SELECT ARRAY[1,2,3] || ARRAY[4,5,6] AS RESULT; + result + --------------- + {1,2,3,4,5,6} + (1 row) + ``` + + ``` + postgres=# SELECT ARRAY[1,2,3] || ARRAY[[4,5,6],[7,8,9]] AS RESULT; + result + --------------------------- + {{1,2,3},{4,5,6},{7,8,9}} + (1 row) + ``` + +- || + + Description: Element-to-array concatenation + + For example: + + ``` + postgres=# SELECT 3 || ARRAY[4,5,6] AS RESULT; + result + ----------- + {3,4,5,6} + (1 row) + ``` + +- || + + Description: Array-to-element concatenation + + For example: + + ``` + postgres=# SELECT ARRAY[4,5,6] || 7 AS RESULT; + result + ----------- + {4,5,6,7} + (1 row) + ``` + + +Array comparisons compare the array contents element-by-element, using the default B-tree comparison function for the element data type. In multidimensional arrays, the elements are accessed in row-major order. If the contents of two arrays are equal but the dimensionality is different, the first difference in the dimensionality information determines the sort order. + +## Array Functions + +- array\_append\(anyarray, anyelement\) + + Description: Appends an element to the end of an array, and only supports dimension-1 arrays. + + Return type: anyarray + + For example: + + ``` + postgres=# SELECT array_append(ARRAY[1,2], 3) AS RESULT; + result + --------- + {1,2,3} + (1 row) + ``` + +- array\_prepend\(anyelement, anyarray\) + + Description: Appends an element to the beginning of an array, and only supports dimension-1 arrays. + + Return type: anyarray + + For example: + + ``` + postgres=# SELECT array_prepend(1, ARRAY[2,3]) AS RESULT; + result + --------- + {1,2,3} + (1 row) + ``` + +- array\_cat\(anyarray, anyarray\) + + Description: Concatenates two arrays, and supports multi-dimensional arrays. + + Return type: anyarray + + For example: + + ``` + postgres=# SELECT array_cat(ARRAY[1,2,3], ARRAY[4,5]) AS RESULT; + result + ------------- + {1,2,3,4,5} + (1 row) + + postgres=# SELECT array_cat(ARRAY[[1,2],[4,5]], ARRAY[6,7]) AS RESULT; + result + --------------------- + {{1,2},{4,5},{6,7}} + (1 row) + ``` + +- array\_ndims\(anyarray\) + + Description: Returns the number of dimensions of the array. + + Return type: int + + For example: + + ``` + postgres=# SELECT array_ndims(ARRAY[[1,2,3], [4,5,6]]) AS RESULT; + result + -------- + 2 + (1 row) + ``` + +- array\_dims\(anyarray\) + + Description: Returns the low-order flag bits and high-order flag bits of each dimension in an array. + + Return type: text + + For example: + + ``` + postgres=# SELECT array_dims(ARRAY[[1,2,3], [4,5,6]]) AS RESULT; + result + ------------ + [1:2][1:3] + (1 row) + ``` + +- array\_length\(anyarray, int\) + + Description: Returns the length of the requested array dimension. **int** is the requested array dimension. + + Return type: int + + For example: + + ``` + postgres=# SELECT array_length(array[1,2,3], 1) AS RESULT; + result + -------- + 3 + (1 row) + + postgres=# SELECT array_length(array[[1,2,3],[4,5,6]], 2) AS RESULT; + result + -------- + 3 + (1 row) + ``` + +- array\_lower\(anyarray, int\) + + Description: Returns lower bound of the requested array dimension. **int** is the requested array dimension. + + Return type: int + + For example: + + ``` + postgres=# SELECT array_lower('[0:2]={1,2,3}'::int[], 1) AS RESULT; + result + -------- + 0 + (1 row) + ``` + +- array\_upper\(anyarray, int\) + + Description: Returns upper bound of the requested array dimension. **int** is the requested array dimension. + + Return type: int + + For example: + + ``` + postgres=# SELECT array_upper(ARRAY[1,8,3,7], 1) AS RESULT; + result + -------- + 4 + (1 row) + ``` + +- array\_to\_string\(anyarray, text \[, text\]\) + + Description: Uses the first **text** as the new delimiter and the second **text** to replace **NULL** values. + + Return type: text + + For example: + + ``` + postgres=# SELECT array_to_string(ARRAY[1, 2, 3, NULL, 5], ',', '*') AS RESULT; + result + ----------- + 1,2,3,*,5 + (1 row) + ``` + +- string\_to\_array\(text, text \[, text\]\) + + Description: Uses the second **text** as the new delimiter and the third **text** as the substring to be replaced by **NULL** values. A substring can be replaced by **NULL** values only when it is the same as the third **text**. + + Return type: text\[\] + + For example: + + ``` + postgres=# SELECT string_to_array('xx~^~yy~^~zz', '~^~', 'yy') AS RESULT; + result + -------------- + {xx,NULL,zz} + (1 row) + postgres=# SELECT string_to_array('xx~^~yy~^~zz', '~^~', 'y') AS RESULT; + result + ------------ + {xx,yy,zz} + (1 row) + ``` + +- unnest\(anyarray\) + + Description: Expands an array to a set of rows. + + Return type: setof anyelement + + For example: + + ``` + postgres=# SELECT unnest(ARRAY[1,2]) AS RESULT; + result + -------- + 1 + 2 + (2 rows) + ``` + + +In **string\_to\_array**, if the delimiter parameter is NULL, each character in the input string will become a separate element in the resulting array. If the delimiter is an empty string, then the entire input string is returned as a one-element array. Otherwise the input string is split at each occurrence of the delimiter string. + +In **string\_to\_array**, if the null-string parameter is omitted or NULL, none of the substrings of the input will be replaced by NULL. + +In **array\_to\_string**, if the null-string parameter is omitted or NULL, any null elements in the array are simply skipped and not represented in the output string. + diff --git a/content/en/docs/Developerguide/arrays-and-records.md b/content/en/docs/Developerguide/arrays-and-records.md new file mode 100644 index 000000000..f7bf3c14b --- /dev/null +++ b/content/en/docs/Developerguide/arrays-and-records.md @@ -0,0 +1,7 @@ +# Arrays and Records + +- **[Arrays](arrays.md)** + +- **[record](record.md)** + + diff --git a/content/en/docs/Developerguide/arrays.md b/content/en/docs/Developerguide/arrays.md new file mode 100644 index 000000000..6a8326563 --- /dev/null +++ b/content/en/docs/Developerguide/arrays.md @@ -0,0 +1,29 @@ +# Arrays + +## Use of Array Types + +Before the use of arrays, an array type needs to be defined: + +Define an array type immediately after the **AS** keyword in a stored procedure. The method is as follows: + +``` +TYPE array_type IS VARRAY(size) OF data_type; +``` + +In the preceding information: + +- **array\_type**: indicates the name of the array type to be defined. +- **VARRAY**: indicates the array type to be defined. +- **size**: indicates the maximum number of members in the array type to be defined. The value is a positive integer. +- **data\_type**: indicates the types of members in the array type to be created. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- In openGauss, an array automatically increases. If an access violation occurs, a null value is returned, and no error message is reported. +>- The scope of an array type defined in a stored procedure takes effect only in this storage process. +>- It is recommended that you use one of the preceding methods to define an array type. If both methods are used to define the same array type, openGauss prefers the array type defined in a stored procedure to declare array variables. + +openGauss supports the access of contents in an array by using parentheses, and the **extend**, **count**, **first**, and **last** functions. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If the stored procedure contains the DML statement \(SELECT, UPDATE, INSERT, or DELETE\), DML statements can access array elements only using brackets. In this way, it may be separated from the function expression area. + diff --git a/content/en/docs/Developerguide/assignment-statements.md b/content/en/docs/Developerguide/assignment-statements.md new file mode 100644 index 000000000..a916f6090 --- /dev/null +++ b/content/en/docs/Developerguide/assignment-statements.md @@ -0,0 +1,26 @@ +# Assignment Statements + +## Syntax + +[Figure 1](#en-us_topic_0237122222_en-us_topic_0059778597_f1087f61f4ec24addbb3b79a2ccf21917) shows the syntax diagram for assigning a value to a variable. + +**Figure 1** assignment\_value::= +![](figures/assignment_value.png "assignment_value") + +The above syntax diagram is explained as follows: + +- **variable\_name** indicates the name of a variable. +- **value** can be a value or an expression. The type of **value** must be compatible with the type of **variable\_name**. + +## Example + +``` +postgres=# DECLARE + emp_id INTEGER := 7788; --Assignment +BEGIN + emp_id := 5; --Assignment + emp_id := 5*7784; +END; +/ +``` + diff --git a/content/en/docs/Developerguide/asynchronous-i-o-operations.md b/content/en/docs/Developerguide/asynchronous-i-o-operations.md new file mode 100644 index 000000000..7438a2f77 --- /dev/null +++ b/content/en/docs/Developerguide/asynchronous-i-o-operations.md @@ -0,0 +1,111 @@ +# Asynchronous I/O Operations + +## enable\_adio\_debug + +**Parameter description**: Specifies whether O&M personnel are allowed to generate some ADIO logs to locate ADIO issues. This parameter is used only by developers. Common users are advised not to use it. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** or **true** indicates that generation of ADIO logs is allowed. +- **off** or **false** indicates that generation of ADIO logs is disallowed. + +**Default value**: **off** + +## enable\_adio\_function + +**Parameter description**: Specifies whether to enable the ADIO function. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** or **true** indicates that the function is enabled. +- **off** or **false** indicates that the function is disabled. + +**Default value**: **off** + +## enable\_fast\_allocate + +**Parameter description**: Specifies whether the quick disk space allocation is enabled. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). This function can be enabled only in the XFS file system. + +**Value range**: Boolean + +- **on** or **true** indicates that the function is enabled. +- **off** or **false** indicates that the function is disabled. + +**Default value**: **off** + +## prefetch\_quantity + +**Parameter description**: Specifies the amount of the I/O that the row-store prefetches using the ADIO. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 128 to 131072. The unit is 8 KB. + +**Default value**: **32MB** \(4096 x 8 KB\) + +## backwrite\_quantity + +**Parameter description**: Specifies the amount of I/O that the row-store writes using the ADIO. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 128 to 131072. The unit is 8 KB. + +**Default value**: **8MB** \(1024 x 8 KB\) + +## cstore\_prefetch\_quantity + +**Parameter description**: Specifies the amount of I/O that the column-store prefetches using the ADIO. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1024 to 1048576. The unit is KB. + +**Default value**: **32MB** + +## cstore\_backwrite\_quantity + +**Parameter description**: Specifies the amount of I/O that the column-store writes using the ADIO. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1024 to 1048576. The unit is KB. + +**Default value**: **8MB** + +## cstore\_backwrite\_max\_threshold + +**Parameter description**: Specifies the maximum amount of buffer I/O that the column-store writes in the database using the ADIO. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 4096 to _INT\_MAX_/2. The unit is KB. + +**Default value**: **2GB** + +## fast\_extend\_file\_size + +**Parameter description**: Specifies the disk size that the row-store pre-scales using the ADIO. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1024 to 1048576. The unit is KB. + +**Default value**: **8MB** + +## effective\_io\_concurrency + +**Parameter description**: Specifies the number of requests that can be simultaneously processed by a disk subsystem. For the RAID array, the parameter value must be the number of disk drive spindles in the array. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 1000 + +**Default value**: **1** + diff --git a/content/en/docs/Developerguide/audit-switch.md b/content/en/docs/Developerguide/audit-switch.md new file mode 100644 index 000000000..f377d3221 --- /dev/null +++ b/content/en/docs/Developerguide/audit-switch.md @@ -0,0 +1,107 @@ +# Audit Switch + +## audit\_enabled + +**Parameter description**: Specifies whether to enable or disable the audit process. After the audit process is enabled, the auditing information written by the background process can be read from the pipe and written into audit files. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the auditing function is enabled. +- **off** indicates that the auditing function is disabled. + +**Default value**: **on** + +## audit\_directory + +**Parameter description**: Specifies the storage directory of audit files. It is a customizable path related to the data directory. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +**Default value:** **pg\_audit** If **om** is used for openGauss deployment, audit logs are stored in **$GAUSSLOG/pg\_audit/_Instance name_**. + +## audit\_data\_format + +**Parameter description**: Audits the format of log files. Currently, only the binary format is supported. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +**Default value**: **binary** + +## audit\_rotation\_interval + +**Parameter description**: Specifies the interval of creating an audit log file. If the difference between the current time and the time when the previous audit log file is created is greater than the value of **audit\_rotation\_interval**, a new audit log file will be generated. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1 to _INT\_MAX_/60. The unit is min. + +**Default value:** **1d** + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Adjust this parameter only when required. Otherwise, **audit\_resource\_policy** may fail to take effect. To control the storage space and time of audit logs, set the [audit\_resource\_policy](#en-us_topic_0237124745_section939915522551), [audit\_space\_limit](#en-us_topic_0237124745_en-us_topic_0059777744_s167d5900250946bca199444c0617c714), and [audit\_file\_remain\_time](#en-us_topic_0237124745_section149961828185211) parameters. + +## audit\_rotation\_size + +**Parameter description**: Specifies the maximum capacity of an audit log file. If the total number of messages in an audit log exceeds the value of **audit\_rotation\_size**, the server will generate a new audit log file. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1024 to 1048576. The unit is KB. + +**Default value**: **10 MB** + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Adjust this parameter only when required. Otherwise, **audit\_resource\_policy** may fail to take effect. To control the storage space and time of audit logs, set the **audit\_resource\_policy**, **audit\_space\_limit**, and **audit\_file\_remain\_time** parameters. + +## audit\_resource\_policy + +**Parameter description**: Specifies the policy for determining whether audit logs are preferentially stored by space or time. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that audit logs are preferentially stored by space. A maximum of [audit\_space\_limit](#en-us_topic_0237124745_en-us_topic_0059777744_s167d5900250946bca199444c0617c714) logs can be stored. +- **off** indicates that audit logs are preferentially stored by time. A minimum duration of [audit\_file\_remain\_time](#en-us_topic_0237124745_section149961828185211) logs must be stored. + +**Default value**: **on** + +## audit\_file\_remain\_time + +**Parameter description**: Specifies the minimum duration required for recording audit logs. This parameter is valid only when [audit\_resource\_policy](#en-us_topic_0237124745_section939915522551) is set to **off**. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 730. The unit is day. **0** indicates that the storage duration is not limited. + +**Default value**: **90** + +## audit\_space\_limit + +**Parameter description**: Specifies the total disk space occupied by audit files. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1024 KB to 1024 GB. The unit is KB. + +**Default value**: **1GB** + +## audit\_file\_remain\_threshold + +**Parameter description**: Specifies the maximum number of audit files in the audit directory. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1 to 1048576 + +**Default value**: **1048576** + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Ensure that this parameter is set to **1048576**. Adjust this parameter only when required. Otherwise, **audit\_resource\_policy** may fail to take effect. To control the storage space and time of audit logs, set the **audit\_resource\_policy**, **audit\_space\_limit**, and **audit\_file\_remain\_time** parameters. + diff --git a/content/en/docs/Developerguide/auditing.md b/content/en/docs/Developerguide/auditing.md new file mode 100644 index 000000000..c6c789964 --- /dev/null +++ b/content/en/docs/Developerguide/auditing.md @@ -0,0 +1,9 @@ +# Auditing + +- **[Audit Switch](audit-switch.md)** + +- **[User and Permission Audit](user-and-permission-audit.md)** + +- **[Operation Auditing](operation-auditing.md)** + + diff --git a/content/en/docs/Developerguide/automatic-vacuuming.md b/content/en/docs/Developerguide/automatic-vacuuming.md new file mode 100644 index 000000000..3d6a900c5 --- /dev/null +++ b/content/en/docs/Developerguide/automatic-vacuuming.md @@ -0,0 +1,192 @@ +# Automatic Vacuuming + +The autovacuum process automatically runs the **VACUUM** and **ANALYZE** statements to reclaim the record space marked as deleted and update statistics about the table. + +## autovacuum + +**Parameter description**: Specifies whether to enable the autovacuum process in the database. Ensure that the [track\_counts](query-and-index-statistics-collector.md#en-us_topic_0237124727_en-us_topic_0059779313_s3f4fb0b1004041f69e1454c701952411) parameter is set to **on** before enabling the automatic cleanup process. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- Set the **autovacuum** parameter to **on** if you want to enable the automatic cleanup of abnormal two-phase transactions when the system recovers from faults. +>- If **autovacuum** is set to **on** and **[autovacuum\_max\_workers](#en-us_topic_0237124730_en-us_topic_0059778244_s76932f79410248ba8923017d19982673)** to **0**, the autovacuum process is enabled only when the system recovers from faults to clean up abnormal two-phase transactions. +>- If **autovacuum** is set to **on** and **[autovacuum\_max\_workers](#en-us_topic_0237124730_en-us_topic_0059778244_s76932f79410248ba8923017d19982673)** to a value greater than **0**, the autovacuum process is enabled to clean up bloated tables during daily operations and abnormal two-phase transactions when the system recovers from faults. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Even if the **autovacuum** parameter is set to **off**, the autovacuum process will be enabled automatically when a transaction ID wraparound is about to occur. When a CREATE DATABASE or DROP DATABASE operation fails, it is possible that the transaction has been committed or rolled back on some nodes whereas some nodes are still in the prepared status. In this case, manual operation as follows is required to restore the nodes: +>1. Use the gs\_clean tool \(setting the **option** parameter to -N\) to query the xid of the abnormal two-phase transaction and nodes in the prepared status. +>2. Log in to the nodes in the prepared status. Administrators connect to an available database such as **postgres** and run the **set xc\_maintenance\_mode = on** statement. +>3. Commit or roll back the two-phase transaction based on the global transaction status. + +**Value range**: Boolean + +- **on** indicates that the autovacuum process is enabled. +- **off** indicates that the autovacuum process is disabled. + +**Default value**: **on** + +## autovacuum\_mode + +**Parameter description**: Specifies whether the autoanalyze or autovacuum function is enabled. This parameter is valid only when **autovacuum** is set to **on**. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Valid value**: enumerated values + +- **analyze** indicates that only autoanalyze is performed. +- **vacuum** indicates that only autovacuum is performed. +- **mix** indicates that both autoanalyze and autovacuum are performed. +- **none** indicates that neither of them is performed. + +**Default value**: **mix** + +## autoanalyze\_timeout + +**Parameter description**: Specifies the timeout period of autoanalyze. If the duration of autoanalyze on a table exceeds the value of **autoanalyze\_timeout**, the autoanalyze is automatically canceled. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from 0 to 2147483. The unit is s. + +**Default value**: **5min** \(300s\) + +## autovacuum\_io\_limits + +**Parameter description**: Specifies the upper limit of I/Os triggered by the autovacuum process per second. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer. The value can be **–1** or a number ranging from 0 to 1073741823. **–1** indicates that the default cgroup is used. + +**Default value**: **–1** + +## log\_autovacuum\_min\_duration + +**Parameter description**: Records each step performed by the autovacuum process to the server log when the execution time of the autovacuum process is greater than or equal to a certain value. This parameter helps track the autovacuum behavior. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +A setting example is as follows: + +Set the **log\_autovacuum\_min\_duration** parameter to 250 ms to record the actions of autovacuum if it runs for 250 ms or longer. + +**Value range**: an integer ranging from –1 to 2147483647. The unit is ms. + +- **0** indicates that all autovacuum actions are recorded in the log. +- **–1** indicates that all autovacuum actions are not recorded in the log. +- A value other than **–1** indicates that a message is recorded when an autovacuum action is skipped due to a lock conflict. + +**Default value**: **–1** + +## autovacuum\_max\_workers + +**Parameter description**: Specifies the maximum number of autovacuum worker threads that can run at the same time. The upper limit of this parameter is related to the values of **max\_connections** and **job\_queue\_processes**. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer. The minimum value is **0**, indicating that autovacuum is not enabled. The theoretical maximum value is **262143**, but the actual maximum value is a dynamic value calculated by the following formula: 262143 - **max\_connections** - **job\_queue\_processes** - Number of auxiliary threads - Number of autovacuum launcher threads - 1. The number of auxiliary threads and the number of autovacuum launcher threads are specified by two macros. Their default values are **20** and **2** respectively. + +**Default value**: **3** + +## autovacuum\_naptime + +**Parameter description**: Specifies the interval between activity rounds for the autovacuum process. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from 1 to 2147483. The unit is s. + +**Default value**: **10min** \(600s\) + +## autovacuum\_vacuum\_threshold + +**Parameter description**: Specifies the threshold for triggering the **VACUUM** operation. When the number of deleted or updated records in a table exceeds the specified threshold, the **VACUUM** operation is executed on this table. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from 0 to 2147483647. + +**Default value**: **50** + +## autovacuum\_analyze\_threshold + +**Parameter description**: Specifies the threshold for triggering the **ANALYZE** operation. When the number of deleted, inserted, or updated records in a table exceeds the specified threshold, the **ANALYZE** operation is executed on this table. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from 0 to 2147483647 + +**Default value**: **50** + +## autovacuum\_vacuum\_scale\_factor + +**Parameter description**: Specifies a fraction of the table size added to the **autovacuum\_vacuum\_threshold** parameter when deciding whether to vacuum a table. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: a floating point number ranging from 0.0 to 100.0 + +**Default value**: **0.2** + +## autovacuum\_analyze\_scale\_factor + +**Parameter description**: Specifies a fraction of the table size added to the **autovacuum\_analyze\_threshold** parameter when deciding whether to analyze a table. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: a floating point number ranging from 0.0 to 100.0 + +**Default value**: **0.1** + +## autovacuum\_freeze\_max\_age + +**Parameter description**: Specifies the maximum age \(in transactions\) that a table's **pg\_class.relfrozenxid** field can attain before a VACUUM operation is performed. + +- The old files under the subdirectory of **pg\_clog/** can also be deleted by the **VACUUM** operation. +- Even if the autovacuum process is disabled, the system will invoke the process to prevent transaction ID wraparound. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer from 100000 to 576460752303423487 + +**Default value**: **20000000000** + +## autovacuum\_vacuum\_cost\_delay + +**Parameter description**: Specifies the value of the cost delay used in the autovacuum operation. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from –1 to 100. The unit is ms. **–1** indicates that the normal vacuum cost delay is used. + +**Default value**: **20ms** + +## autovacuum\_vacuum\_cost\_limit + +**Parameter description**: Specifies the value of the cost limit used in the autovacuum operation. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from –1 to 10000 **–1** indicates that the normal vacuum cost limit is used. + +**Default value**: **–1** + +## twophase\_clean\_workers + +**Parameter description**: Specifies the maximum number of concurrent cleanup operations that can be performed by the gs\_clean tool. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from 1 to 10 + +**Default value**: **3** + +## defer\_csn\_cleanup\_time + +**Parameter description**: Specifies the interval of recycling transaction IDs. + +**Value range**: an integer ranging from 0 to _INT\_MAX_. The unit is ms. + +**Default value**: **5s** \(5000 ms\) + diff --git a/content/en/docs/Developerguide/background-writer.md b/content/en/docs/Developerguide/background-writer.md new file mode 100644 index 000000000..3579c54cb --- /dev/null +++ b/content/en/docs/Developerguide/background-writer.md @@ -0,0 +1,49 @@ +# Background Writer + +This section describes background writer parameters. The background writer process is used to write dirty data \(new or modified data\) in shared buffers to disks. This mechanism ensures that database processes seldom or never need to wait for a write action to occur when handling user queries. + +It also mitigates performance deterioration caused by checkpoints because only a few of dirty pages need to be flushed to the disk when the checkpoints arrive. This mechanism, however, increases the overall net I/O load because while a repeatedly-dirtied page may otherwise be written only once per checkpoint interval, the background writer may write it several times as it is dirtied in the same interval. In most cases, continuous light loads are preferred, instead of periodical load peaks. The parameters discussed in this section can be set based on actual requirements. + +## bgwriter\_delay + +**Parameter description**: Specifies the interval at which the background writer writes dirty shared buffers. The background writer initiates write operations for some dirty shared buffers \(the volume of data to be written is specified by the **bgwriter\_lru\_maxpages** parameter\), sleep for the milliseconds specified by **bgwriter\_delay**, and then restarts. + +In many systems, the effective resolution of sleep delays is 10 milliseconds. Therefore, setting this parameter to a value that is not a multiple of 10 has the same effect as setting it to the next higher multiple of 10. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 10 to 10000. The unit is millisecond. + +**Default value**: **10s** + +**Setting suggestion:** Reduce this value in slow data writing scenarios to reduce the checkpoint load. + +## bgwriter\_lru\_maxpages + +**Parameter description**: Specifies the number of dirty buffers the background writer can write in each round. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 1000 + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>When this parameter is set to **0**, the background writer is disabled. This setting does not affect checkpoints. + +**Default value**: **100** + +## bgwriter\_lru\_multiplier + +**Parameter description**: Specifies the coefficient used to estimate the number of dirty buffers the background writer can write in the next round. + +The number of dirty buffers written in each round depends on the number of buffers used by server processes during recent rounds. The estimated number of buffers required in the next round is calculated using the following formula: Average number of recently used buffers x **bgwriter\_lru\_multiplier**. The background writer writes dirty buffers until sufficient clean and reusable buffers are available. The number of buffers the background writer writes in each round is always equal to or less than **bgwriter\_lru\_maxpages**. + +Therefore, the value **1.0** represents a just-in-time policy of writing exactly the number of dirty buffers predicted to be required. Larger values provide some cushion against spikes in demand, whereas smaller values intentionally leave more writes to be done by server processes. + +Smaller values of **bgwriter\_lru\_maxpages** and **bgwriter\_lru\_multiplier** reduce the extra I/O load caused by the background writer, but make it more likely that server processes will have to issue writes for themselves, delaying interactive queries. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a floating point number ranging from 0 to 10 + +**Default value:** **2** + diff --git a/content/en/docs/Developerguide/backup-and-restoration-control-functions.md b/content/en/docs/Developerguide/backup-and-restoration-control-functions.md new file mode 100644 index 000000000..b9cd5114a --- /dev/null +++ b/content/en/docs/Developerguide/backup-and-restoration-control-functions.md @@ -0,0 +1,205 @@ +# Backup and Restoration Control Functions + +## Backup Control Functions + +Backup control functions help with online backup. + +- pg\_create\_restore\_point\(name text\) + + Description: Creates a named point for performing the restore operation \(restricted to system administrators\). + + Return type: text + + Note: **pg\_create\_restore\_point** creates a named transaction log record that can be used as a restoration target, and returns the corresponding transaction log location. The given name can then be used with **recovery\_target\_name** to specify the point up to which restoration will proceed. Avoid creating multiple restoration points with the same name, since restoration will stop at the first one whose name matches the restoration target. + +- pg\_current\_xlog\_location\(\) + + Description: Obtains the write position of the current transaction log. + + Return type: text + + Note: **pg\_current\_xlog\_location** displays the write position of the current transaction log in the same format as those of the previous functions. Read-only operations do not require rights of the system administrator. + +- pg\_current\_xlog\_insert\_location\(\) + + Description: Obtains the insert position of the current transaction log. + + Return type: text + + Note: **pg\_current\_xlog\_insert\_location** displays the insert position of the current transaction log. The insertion point is the logical end of the transaction log at any instant, while the write location is the end of what has been written out from the server's internal buffers. The write position is the end that can be detected externally from the server. This operation can be performed to archive only some of completed transaction log files. The insert position is used for commissioning the server. Read-only operations do not require rights of the system administrator. + +- pg\_start\_backup\(label text \[, fast boolean \]\) + + Description: Starts executing online backup \(restricted to system administrators or replication roles\). + + Return type: text + + Note: **pg\_start\_backup** receives a user-defined backup label \(usually the name of the position where the backup dump file is stored\). This function writes a backup label file to the data directory of openGauss and then returns the starting position of backed up transaction logs in text mode. + + ``` + postgres=# SELECT pg_start_backup('label_goes_here'); + pg_start_backup + ----------------- + 0/3000020 + (1 row) + ``` + +- pg\_stop\_backup\(\) + + Description: Completes online backup \(restricted to system administrators or replication roles\). + + Return type: text + + Note: **pg\_stop\_backup** deletes the label file created by **pg\_start\_backup** and creates a backup history file in the transaction log archive area. The history file includes the label given to **pg\_start\_backup**, the starting and ending transaction log locations for the backup, and the starting and ending times of the backup. The return value is the backup's ending transaction log location. After the ending position is calculated, the insert position of the current transaction log automatically goes ahead to the next transaction log file. This way, the ended transaction log file can be immediately archived so that backup is complete. + +- pg\_switch\_xlog\(\) + + Description: Switches to a new transaction log file \(restricted to system administrators\). + + Return type: text + + Note: **pg\_switch\_xlog** moves to the next transaction log file so that the current log file can be archived \(if continuous archive is used\). The return value is the ending transaction log location + 1 within the just-completed transaction log file. If there has been no transaction log activity since the last transaction log switchover, **pg\_switch\_xlog** will do nothing but return the start location of the transaction log file currently in use. + +- pg\_xlogfile\_name\(location text\) + + Description: Converts the position string in a transaction log to a file name. + + Return type: text + + Note: **pg\_xlogfile\_name** extracts only the transaction log file name. If the given transaction log position is the transaction log file border, a transaction log file name will be returned for both the two functions. This is usually the desired behavior for managing transaction log archiving, since the preceding file is the last one that currently needs to be archived. + +- pg\_xlogfile\_name\_offset\(location text\) + + Description: Converts the position string in a transaction log to a file name and returns the byte offset in the file. + + Return type: text, integer + + Note: **pg\_xlogfile\_name\_offset** can extract transaction log file names and byte offsets from the returned results of the preceding functions. Example: + + ``` + postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); + NOTICE: pg_stop_backup cleanup done, waiting for required WAL segments to be archived + NOTICE: pg_stop_backup complete, all required WAL segments have been archived + file_name | file_offset + --------------------------+------------- + 000000010000000000000003 | 272 + (1 row) + ``` + +- pg\_xlog\_location\_diff\(location text, location text\) + + Description: **pg\_xlog\_location\_diff** calculates the difference in bytes between two transaction log locations. + + Return type: numeric + +- pg\_cbm\_tracked\_location\(\) + + Description: Queries for the LSN location parsed by CBM. + + Return type: text + +- pg\_cbm\_get\_merged\_file\(startLSNArg text, endLSNArg text\) + + Description: Combines CBM files within the specified LSN range into one and returns the name of the combined file. + + Return type: text + +- pg\_cbm\_get\_changed\_block\(startLSNArg text, endLSNArg text\) + + Description: Combines CBM files within the specified LSN range into a table and return records of this table. + + Return type: record + + Note: The table columns include the start LSN, end LSN, tablespace OID, database OID, table relfilenode, table fork number, whether the table is deleted, whether the table is created, whether the table is truncated, number of pages in the truncated table, number of modified pages, and list of No. of modified pages. + +- pg\_cbm\_recycle\_file\(targetLSNArg text\) + + Description: Deletes the CBM files that are no longer used and returns the first LSN after the deletion. + + Return type: text + +- pg\_cbm\_force\_track\(targetLSNArg text,timeOut int\) + + Description: Forcibly executes the CBM trace to the specified Xlog position and returns the Xlog position of the actual trace end point. + + Return type: text + +- pg\_enable\_delay\_ddl\_recycle\(is\_full\_backup boolean, backup\_key text\) + + Description: Enables DDL delay and returns the Xlog position of the enabling point. + + Return type: text + +- pg\_disable\_delay\_ddl\_recycle\(barrierLSNArg text, isForce bool, is\_full\_backup boolean, backup\_key text\) + + Description: Disables DDL delay and returns the Xlog range where DDL delay takes effect. + + Return type: record + +- pg\_enable\_delay\_xlog\_recycle\(\) + + Description: Enables Xlog recycle delay. This function is used in primary database node restoration. + + Return type: void + +- pg\_disable\_delay\_xlog\_recycle\(\) + + Description: Disables Xlog recycle delay. This function is used in primary database node restoration. + + Return type: void + + +## Restoration Control Functions + +Restoration control functions provide information about the status of standby nodes. These functions may be executed both during restoration and in normal running. + +- pg\_is\_in\_recovery\(\) + + Description: Returns **true** if restoration is still in progress. + + Return type: bool + +- pg\_last\_xlog\_receive\_location\(\) + + Description: Gets the last transaction log location received and synchronized to disk by streaming replication. While streaming replication is in progress, this will increase monotonically. If restoration has completed, this value will remain static at the value of the last WAL record received and synchronized to disk during restoration. If streaming replication is disabled or if it has not yet started, the function returns **NULL**. + + Return type: text + +- pg\_last\_xlog\_replay\_location\(\) + + Description: Gets last transaction log location replayed during restoration. If restoration is still in progress, this will increase monotonically. If restoration has completed, then this value will remain static at the value of the last WAL record received during that restoration. When the server has been started normally without restoration, the function returns **NULL**. + + Return type: text + +- pg\_last\_xact\_replay\_timestamp\(\) + + Description: Gets the timestamp of last transaction replayed during restoration. This is the time to commit a transaction or abort a WAL record on the primary node. If no transactions have been replayed during restoration, this function will return **NULL**. If restoration is still in progress, this will increase monotonically. If restoration has completed, then this value will remain static at the value of the last WAL record received during that restoration. If the server normally starts without manual intervention, this function will return **NULL**. + + Return type: timestamp with time zone + + +Restoration control functions control restoration processes. These functions may be executed only during restoration. + +- pg\_is\_xlog\_replay\_paused\(\) + + Description: Returns **true** if restoration is paused. + + Return type: bool + +- pg\_xlog\_replay\_pause\(\) + + Description: Pauses restoration immediately. + + Return type: void + +- pg\_xlog\_replay\_resume\(\) + + Description: Restarts restoration if it was paused. + + Return type: void + + +While restoration is paused, no further database changes are applied. In hot standby mode, all new queries will see the same consistent snapshot of the database, and no further query conflicts will be generated until restoration is resumed. + +If streaming replication is disabled, the paused state may continue indefinitely without problem. While streaming replication is in progress, WAL records will continue to be received, which will eventually fill available disk space. This progress depends on the duration of the pause, the rate of WAL generation, and available disk space. + diff --git a/content/en/docs/Developerguide/basic-statements.md b/content/en/docs/Developerguide/basic-statements.md new file mode 100644 index 000000000..9f978d08c --- /dev/null +++ b/content/en/docs/Developerguide/basic-statements.md @@ -0,0 +1,14 @@ +# Basic Statements + +During PL/SQL programming, you may define some variables, assign values to variables, and call other stored procedures. This chapter describes basic PL/SQL statements, including variable definition statements, value assignment statements, call statements, and return statements. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>You are advised not to call the SQL statements containing passwords in the stored procedures because authorized users may view the stored procedure file in the database and password information is leaked. If a stored procedure contains other sensitive information, permission to access this procedure must be configured, preventing information leakage. + +- **[Define Variable](define-variable.md)** + +- **[Assignment Statements](assignment-statements.md)** + +- **[Call Statement](call-statement.md)** + + diff --git a/content/en/docs/Developerguide/basic-structure.md b/content/en/docs/Developerguide/basic-structure.md new file mode 100644 index 000000000..b792f7868 --- /dev/null +++ b/content/en/docs/Developerguide/basic-structure.md @@ -0,0 +1,47 @@ +# Basic Structure + +## Structure + +A PL/SQL block can contain a sub-block which can be placed in any section. The following describes the architecture of a PL/SQL block: + +- **DECLARE**: declares variables, types, cursors, and regional stored procedures and functions used in the PL/SQL block. + + ``` + DECLARE + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >This part is optional if no variable needs to be declared. + >- An anonymous block may omit the **DECLARE** keyword if no variable needs to be declared. + >- For a stored procedure, **AS** is used, which is equivalent to **DECLARE**. The **AS** keyword must be reserved even if there is no variable declaration part. + +- **EXECUTION**: specifies procedure and SQL statements. It is the main part of a program. Mandatory. + + ``` + BEGIN + ``` + +- Exception part: processes errors. Optional. + + ``` + EXCEPTION + ``` + +- End + + ``` + END; + / + ``` + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >You are not allowed to use consecutive tabs in the PL/SQL block because they may result in an exception when the **gsql** tool is executed with the **-r** parameter specified. + + +## Category + +PL/SQL blocks are classified into the following types: + +- Anonymous block: a dynamic block that can be executed only for once. For details about the syntax, see [Figure 1](anonymous-blocks.md#en-us_topic_0237122218_en-us_topic_0059779171_f19ed9f384e0646f29744951d7eec8c3b). +- Subprogram: a stored procedure, function, operator, or packages stored in a database. A subprogram created in a database can be called by other programs. + diff --git a/content/en/docs/Developerguide/basic-text-matching.md b/content/en/docs/Developerguide/basic-text-matching.md new file mode 100644 index 000000000..18120c6e4 --- /dev/null +++ b/content/en/docs/Developerguide/basic-text-matching.md @@ -0,0 +1,53 @@ +# Basic Text Matching + +Full text search in openGauss is based on the match operator **@@**, which returns **true** if a **tsvector** \(document\) matches a **tsquery** \(query\). It does not matter which data type is written first: + +``` +postgres=# SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector @@ 'cat & rat'::tsquery AS RESULT; + result +---------- + t +(1 row) +``` + +``` +postgres=# SELECT 'fat & cow'::tsquery @@ 'a fat cat sat on a mat and ate a fat rat'::tsvector AS RESULT; + result +---------- + f +(1 row) +``` + +As the above example suggests, a **tsquery** is not raw text, any more than a **tsvector** is. A tsquery contains search terms, which must be already-normalized lexemes, and may combine multiple terms using **AND**, **OR**, and **NOT** operators. For details, see [Text Search Types](text-search-types.md). There are functions **to\_tsquery** and **plainto\_tsquery** that are helpful in converting user-written text into a proper tsquery, for example by normalizing words appearing in the text. Similarly, **to\_tsvector** is used to parse and normalize a document string. So in practice a text search match would look more like this: + +``` +postgres=# SELECT to_tsvector('fat cats ate fat rats') @@ to_tsquery('fat & rat') AS RESULT; +result +---------- + t +(1 row) +``` + +Observe that this match would not succeed if written as follows: + +``` +postgres=# SELECT 'fat cats ate fat rats'::tsvector @@ to_tsquery('fat & rat')AS RESULT; +result +---------- + f +(1 row) +``` + +In the preceding match, no normalization of the word **rats** will occur. Therefore, **rats** does not match **rat**. + +The **@@** operator also supports text input, allowing explicit conversion of a text string to **tsvector** or **tsquery** to be skipped in simple cases. The variants available are: + +``` +tsvector @@ tsquery +tsquery @@ tsvector +text @@ tsquery +text @@ text +``` + +We already saw the first two of these. The form **text @@ tsquery** is equivalent to **to\_tsvector\(text\) @@ tsquery**. The form **text @@ text** is equivalent to **to\_tsvector\(text\) @@ plainto\_tsquery\(text\)**. + diff --git a/content/en/docs/Developerguide/before-you-start.md b/content/en/docs/Developerguide/before-you-start.md new file mode 100644 index 000000000..0ae08b173 --- /dev/null +++ b/content/en/docs/Developerguide/before-you-start.md @@ -0,0 +1,146 @@ +# Before You Start + +This section explains how to use databases, including creating databases and tables, inserting data to tables, and querying data in tables. + +## Prerequisites + +openGauss is running properly. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. + + If you are not sure which server the primary node of the database is deployed on, see [Confirming Connection Information](confirming-connection-information.md). + +2. Connect to a database. + + ``` + gsql -d postgres -p 8000 + ``` + + If the following information is displayed, the connection has been established: + + ``` + gsql ((openGauss 1.0.0 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + postgres=# + ``` + + **postgres** is the database generated by default after openGauss installation is complete. You can connect to this database to create a database. **8000** is the port number of the database primary node, and you can change it as needed. You can obtain the port number by following the instructions provided in [Confirming Connection Information](confirming-connection-information.md). + + **Note:** + + - You need to use a client program or tool to connect to the database and to deliver SQL statements + - **gsql** is a command-line interface \(CLI\) tool provided for connecting to a database. For more database connection methods, see [Connecting to a Database](connecting-to-a-database.md). + +3. Create a database user. + + Only administrators that are created during openGauss installation can access the initial database by default. You can also create other database users. + + ``` + postgres=# CREATE USER joe WITH PASSWORD "Bigdata@123"; + ``` + + If the following information is displayed, the user has been created: + + ``` + CREATE ROLE + ``` + + In this case, you have created a user named **joe**, and the user password is **Bigdata@123**. + + **Note**: For details about how to create users, see [Managing Users and Their Permissions](managing-users-and-their-permissions.md). + +4. Create a database. + + ``` + postgres=# CREATE DATABASE db_tpcc OWNER joe; + ``` + + If the following information is displayed, the database has been created: + + ``` + CREATE DATABASE + ``` + + After creating the **db\_tpcc** database, you can run the following command to exit the **postgres** database and log in to the **db\_tpcc** database as the user you created for more operations. You can also continue using the default **postgres** database. + + ``` + postgres=# \q + gsql -d db_tpcc -p 8000 -U joe -W Bigdata@123 + gsql ((openGauss 1.0.0 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + db_tpcc=> + ``` + + Create a schema. + + ``` + db_tpcc=> CREATE SCHEMA joe AUTHORIZATION joe; + ``` + + If the following information is displayed, the schema has been created: + + ``` + CREATE SCHEMA + ``` + + **Note:** + + New databases are created in the **pg\_default** tablespace by default. To specify another tablespace, run the following statement: + + ``` + postgres=# CREATE DATABASE db_tpcc WITH TABLESPACE = hr_local; + CREATE DATABASE + ``` + + _hr\_local_ indicates the tablespace name. For details about how to create a tablespace, see [Creating and Managing Tablespaces](creating-and-managing-tablespaces.md). + +5. Create a table. + + - Create a table named **mytable** that has only one column. The column name is **firstcol** and the column type is **integer**. + + ``` + db_tpcc=> CREATE TABLE mytable (firstcol int); + ``` + + ``` + CREATE TABLE + ``` + + - Run the following command to insert data to the table: + + ``` + db_tpcc=> INSERT INTO mytable values (100); + ``` + + If the following information is displayed, the data has been inserted: + + ``` + INSERT 0 1 + ``` + + - Run the following command to view data in the table: + + ``` + db_tpcc=> SELECT * from mytable; + firstcol + ---------- + 100 + (1 row) + ``` + + **Note:** + + - By default, new database objects, such as the **mytable** table, are created in the _$user_ schema. For more details about schemas, see [Creating and Managing Schemas](creating-and-managing-schemas.md). + - For more details about how to create a table, see [Creating and Managing Tables](creating-and-managing-tables.md). + - In addition to the created tables, a database contains many system catalogs. These system catalogs contain openGauss installation information and information about various queries and processes in openGauss. You can collect information about the database by querying system catalogs. For details, see [Querying System Catalogs](querying-system-catalogs.md). + + openGauss supports row and column storage, providing high query performance for interaction analysis in complex scenarios. For details about how to select a storage model, see [Planning a Storage Model](planning-a-storage-model.md). + + + diff --git a/content/en/docs/Developerguide/begin.md b/content/en/docs/Developerguide/begin.md new file mode 100644 index 000000000..8fa0a474e --- /dev/null +++ b/content/en/docs/Developerguide/begin.md @@ -0,0 +1,58 @@ +# BEGIN + +## Function + +**BEGIN** may be used to initiate an anonymous block or a single transaction. This section describes the syntax of **BEGIN** used to initiate an anonymous block. For details about the **BEGIN** syntax that initiates transactions, see [START TRANSACTION](start-transaction.md). + +An anonymous block is a structure that can dynamically create and execute stored procedure code instead of permanently storing code as a database object in the database. + +## Precautions + +None + +## Syntax + +- Enable an anonymous block. + + ``` + [DECLARE [declare_statements]] + BEGIN + execution_statements + END; + / + ``` + +- Start a transaction. + + ``` + BEGIN [ WORK | TRANSACTION ] + [ + { + ISOLATION LEVEL { READ COMMITTED | SERIALIZABLE | REPEATABLE READ } + | { READ WRITE | READ ONLY } + } [, ...] + ]; + ``` + + +## Parameter Description + +- **declare\_statements** + + Declares a variable, including its name and type, for example, **sales\_cnt int**. + +- **execution\_statements** + + Specifies the statement to be executed in an anonymous block. + + Value range: an existing function name + + +## Examples + +None + +## Helpful Links + +[START TRANSACTION](start-transaction.md) + diff --git a/content/en/docs/Developerguide/benchmarksql-an-open-source-tpc-c-tool.md b/content/en/docs/Developerguide/benchmarksql-an-open-source-tpc-c-tool.md new file mode 100644 index 000000000..8d2ab732c --- /dev/null +++ b/content/en/docs/Developerguide/benchmarksql-an-open-source-tpc-c-tool.md @@ -0,0 +1,14 @@ +# BenchmarkSQL – An Open-Source TPC-C Tool + +For example, to test TPCC, the **BenchmarkSQL** can****be used, as follows: + +- Download **benchmarksql** from the following link –[https://osdn.net/frs/g\_redir.php?m=kent&f=benchmarksql%2Fbenchmarksql-5.0.zip](https://osdn.net/frs/g_redir.php?m=kent&f=benchmarksql%2Fbenchmarksql-5.0.zip). +- Under run/sql.common, adjust the schema creation scripts to MOT syntax and avoid unsupported DDLs. +- The following attachment is a sql.common that has already been adjusted for MOT. Replace the contents of the sql.common folder with the contents of the following attachment and execute. + +![](figures/en-us_image_0257713450.png) + +This folder includes all the create tables and indexes with MOT syntax. The only difference is in Tables DDL – create FOREIGN table and drop FOREIGN table, which have an additional FOREIGN****keyword that specifies it as a MOT Table. In case the reader cannot download the embedded file, the required change should added to relevant files in the sql.common folder before their execution. IS THIS CORRECT? GGG + +**Note** – The benchmark test is executed using a standard interactive SQL mode without stored procedures. + diff --git a/content/en/docs/Developerguide/best-practices.md b/content/en/docs/Developerguide/best-practices.md new file mode 100644 index 000000000..093d57ec3 --- /dev/null +++ b/content/en/docs/Developerguide/best-practices.md @@ -0,0 +1,79 @@ +# Best Practices + +For details about the parameters, see [GS\_OPT\_MODEL](gs_opt_model.md). + +**Table 1** + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Model Parameter

+

Recommended Value

+

template_name

+

rlstm

+

model_name

+

The value can be customized, for example, open_ai. The value must meet the unique constraint.

+

datname

+

Name of the database to be served, for example, postgres.

+

ip

+

IP address of the AI Engine, for example, 127.0.0.1.

+

port

+

AI Engine listening port number, for example, 5000.

+

max_epoch

+

Iteration times. A large value is recommended to ensure the convergence effect, for example, 2000.

+

learning_rate

+

(0, 1] is a floating-point number. A large learning rate is recommended to accelerate convergence.

+

dim_red

+

Number of feature values to be reduced.

+

-1: Do not use PCA for dimension reduction. All features are supported.

+

Floating point number in the range of (0, 1]: A smaller value indicates a smaller training dimension and a faster convergence speed, but affects the training accuracy.

+

hidden_units

+

If the feature value dimension is high, you are advised to increase the value of this parameter to increase the model complexity. For example, set this parameter to 64, 128, and so on.

+

batch_size

+

You are advised to increase the value of this parameter based on the amount of encoded data to accelerate model convergence. For example, set this parameter to 256, 512, and so on.

+

Other parameters

+

See GS_OPT_MODEL.

+
+ +Recommended parameter settings: + +``` +INSERT INTO gs_opt_model values('rlstm', 'open_ai', 'postgres', '127.0.0.1', 5000, 2000, 1, -1, 64, 512, 0, false, false, '{S, T}', '{0,0}', '{0,0}', 'Text'); +``` + diff --git a/content/en/docs/Developerguide/bgwriter_stat.md b/content/en/docs/Developerguide/bgwriter_stat.md new file mode 100644 index 000000000..6072d9e57 --- /dev/null +++ b/content/en/docs/Developerguide/bgwriter_stat.md @@ -0,0 +1,95 @@ +# BGWRITER\_STAT + +**BGWRITER\_STAT** displays statistics about the background writer process's activities. + +**Table 1** BGWRITER\_STAT columns + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Type

+

Description

+

checkpoints_timed

+

bigint

+

Number of scheduled checkpoints that have been performed

+

checkpoints_req

+

bigint

+

Number of requested checkpoints that have been performed

+

checkpoint_write_time

+

double precision

+

Total time that has been spent in the portion of checkpoint processing where files are written to disk (unit: ms)

+

checkpoint_sync_time

+

double precision

+

Total time that has been spent in the portion of checkpoint processing where files are synchronized to disk (unit: ms)

+

buffers_checkpoint

+

bigint

+

Number of buffers written during checkpoints

+

buffers_clean

+

bigint

+

Number of buffers written by the background writer

+

maxwritten_clean

+

bigint

+

Number of times the background writer stopped a cleaning scan because it had written too many buffers

+

buffers_backend

+

bigint

+

Number of buffers written directly by a backend

+

buffers_backend_fsync

+

bigint

+

Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)

+

buffers_alloc

+

bigint

+

Number of buffers allocated

+

stats_reset

+

timestamp with time zone

+

Time at which these statistics were last reset

+
+ diff --git a/content/en/docs/Developerguide/binary-data-types.md b/content/en/docs/Developerguide/binary-data-types.md new file mode 100644 index 000000000..d8f170b43 --- /dev/null +++ b/content/en/docs/Developerguide/binary-data-types.md @@ -0,0 +1,74 @@ +# Binary Data Types + +[Table 1](#en-us_topic_0237121951_en-us_topic_0059778141_t910f42f45b374d94afe2798c42fc5ef6) lists the binary data types supported by openGauss. + +**Table 1** Binary data types + + + + + + + + + + + + + + + + + + + + +

Name

+

Description

+

Storage Space

+

BLOB

+

Binary large object

+

+
NOTE:

Column storage cannot be used for the BLOB type.

+
+

The maximum size is 8,203 bytes less than 1 GB, namely, 1,073,733,621 bytes.

+

RAW

+

Variable-length hexadecimal string

+
NOTE:

Column storage cannot be used for the raw type.

+
+

4 bytes plus the actual hexadecimal string. The maximum size is 8,203 bytes less than 1 GB, namely, 1,073,733,621 bytes.

+

BYTEA

+

Variable-length binary string

+

4 bytes plus the actual binary string. The maximum size is 8,203 bytes less than 1 GB, namely, 1,073,733,621 bytes.

+
+ +>![](public_sys-resources/icon-note.gif) **NOTE:** +>In addition to the size limitation on each column, the total size of each tuple is 8,203 bytes less than 1 GB, namely, 1,073,733,621 bytes. + +An example is provided as follows: + +``` +-- Create a table. +postgres=# CREATE TABLE blob_type_t1 +( + BT_COL1 INTEGER, + BT_COL2 BLOB, + BT_COL3 RAW, + BT_COL4 BYTEA +) ; + +-- Insert data. +postgres=# INSERT INTO blob_type_t1 VALUES(10,empty_blob(), +HEXTORAW('DEADBEEF'),E'\\xDEADBEEF'); + +-- Query data in the table. +postgres=# SELECT * FROM blob_type_t1; + bt_col1 | bt_col2 | bt_col3 | bt_col4 +---------+---------+----------+------------ + 10 | | DEADBEEF | \xdeadbeef +(1 row) + +-- Delete the table. +postgres=# DROP TABLE blob_type_t1; +``` + diff --git a/content/en/docs/Developerguide/binary-string-functions-and-operators.md b/content/en/docs/Developerguide/binary-string-functions-and-operators.md new file mode 100644 index 000000000..fd12ec671 --- /dev/null +++ b/content/en/docs/Developerguide/binary-string-functions-and-operators.md @@ -0,0 +1,172 @@ +# Binary String Functions and Operators + +## String Operators + +SQL defines some string functions that use keywords, rather than commas, to separate arguments. + +- octet\_length\(string\) + + Description: Number of bytes in binary string + + Return type: int + + For example: + + ``` + postgres=# SELECT octet_length(E'jo\\000se'::bytea) AS RESULT; + result + -------- + 5 + (1 row) + ``` + +- overlay\(string placing string from int \[for int\]\) + + Description: Replaces substring. + + Return type: bytea + + For example: + + ``` + postgres=# SELECT overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 2 for 3) AS RESULT; + result + ---------------- + \x5402036d6173 + (1 row) + ``` + +- position\(substring in string\) + + Description: Location of specified substring + + Return type: int + + For example: + + ``` + postgres=# SELECT position(E'\\000om'::bytea in E'Th\\000omas'::bytea) AS RESULT; + result + -------- + 3 + (1 row) + ``` + +- substring\(string \[from int\] \[for int\]\) + + Description: Truncates substring. + + Return type: bytea + + For example: + + ``` + postgres=# SELECT substring(E'Th\\000omas'::bytea from 2 for 3) AS RESULT; + result + ---------- + \x68006f + (1 row) + ``` + +- trim\(\[both\] bytes from string\) + + Description: Removes the longest string containing only bytes from **bytes** from the start and end of **string**. + + Return type: bytea + + For example: + + ``` + postgres=# SELECT trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea) AS RESULT; + result + ---------- + \x546f6d + (1 row) + ``` + + +## Other Binary String Functions + +openGauss provides common syntax used for calling functions. + +- btrim\(string bytea,bytes bytea\) + + Description: Removes the longest string containing only bytes from **bytes** from the start and end of **string**. + + Return type: bytea + + For example: + + ``` + postgres=# SELECT btrim(E'\\000trim\\000'::bytea, E'\\000'::bytea) AS RESULT; + result + ------------ + \x7472696d + (1 row) + ``` + +- get\_bit\(string, offset\) + + Description: Extracts bit from string. + + Return type: int + + For example: + + ``` + postgres=# SELECT get_bit(E'Th\\000omas'::bytea, 45) AS RESULT; + result + -------- + 1 + (1 row) + ``` + +- get\_byte\(string, offset\) + + Description: Extracts byte from string. + + Return type: int + + For example: + + ``` + postgres=# SELECT get_byte(E'Th\\000omas'::bytea, 4) AS RESULT; + result + -------- + 109 + (1 row) + ``` + +- set\_bit\(string,offset, newvalue\) + + Description: Sets bit in string. + + Return type: bytea + + For example: + + ``` + postgres=# SELECT set_bit(E'Th\\000omas'::bytea, 45, 0) AS RESULT; + result + ------------------ + \x5468006f6d4173 + (1 row) + ``` + +- set\_byte\(string,offset, newvalue\) + + Description: Sets byte in string. + + Return type: bytea + + For example: + + ``` + postgres=# SELECT set_byte(E'Th\\000omas'::bytea, 4, 64) AS RESULT; + result + ------------------ + \x5468006f406173 + (1 row) + ``` + + diff --git a/content/en/docs/Developerguide/bios-16.md b/content/en/docs/Developerguide/bios-16.md new file mode 100644 index 000000000..26cc21db2 --- /dev/null +++ b/content/en/docs/Developerguide/bios-16.md @@ -0,0 +1,17 @@ +# BIOS + +Modify related BIOS settings, as follows: + +1. Select **BIOS** è **Advanced** è **MISC Config**. Set** Support Smmu **to **Disabled**. +2. Select **BIOS** è **Advanced** è **MISC Config**. Set **CPU Prefetching Configuration** to **Disabled**. + + ![](figures/en-us_image_0257713415.png) + +3. Select **BIOS** è **Advanced** è **Memory Config**. Set **Die Interleaving** to **Disabled**. + + ![](figures/en-us_image_0257713417.png) + +4. Select **BIOS** è **Advanced** è **Performance Config**. Set **Power Policy** to **Performance**. + +![](figures/en-us_image_0257713419.png) + diff --git a/content/en/docs/Developerguide/bios.md b/content/en/docs/Developerguide/bios.md new file mode 100644 index 000000000..706b3856f --- /dev/null +++ b/content/en/docs/Developerguide/bios.md @@ -0,0 +1,2 @@ +# BIOS + diff --git a/content/en/docs/Developerguide/bit-string-functions-and-operators.md b/content/en/docs/Developerguide/bit-string-functions-and-operators.md new file mode 100644 index 000000000..36bb153e2 --- /dev/null +++ b/content/en/docs/Developerguide/bit-string-functions-and-operators.md @@ -0,0 +1,144 @@ +# Bit String Functions and Operators + +## Bit String Operators + +Aside from the usual comparison operators, the following operators can be used. Bit string operands of **&**, **|**, and **\#** must be of equal length. When bit shifting, the original length of the string is preserved by zero padding \(if necessary\). + +- || + + Description: Connects bit strings. + + For example: + + ``` + postgres=# SELECT B'10001' || B'011' AS RESULT; + result + ---------- + 10001011 + (1 row) + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >A column can have a maximum of 180 consecutive internal joins. A column with excessive joins will be split into joined consecutive strings. + >Example: **str1||str2||str3||str4** is split into **\(str1||str2\)||\(str3||str4\)**. + +- & + + Description: AND operation between bit strings + + For example: + + ``` + postgres=# SELECT B'10001' & B'01101' AS RESULT; + result + -------- + 00001 + (1 row) + ``` + +- | + + Description: OR operation between bit strings + + For example: + + ``` + postgres=# SELECT B'10001' | B'01101' AS RESULT; + result + -------- + 11101 + (1 row) + ``` + +- \# + + Description: OR operation between bit strings if they are inconsistent. If the same positions in the two bit strings are both 1 or 0, the position returns **0**. + + For example: + + ``` + postgres=# SELECT B'10001' # B'01101' AS RESULT; + result + -------- + 11100 + (1 row) + ``` + +- \~ + + Description: NOT operation between bit strings + + For example: + + ``` + postgres=# SELECT ~B'10001'AS RESULT; + result + ---------- + 01110 + (1 row) + ``` + +- << + + Description: Shifts left in a bit string. + + For example: + + ``` + postgres=# SELECT B'10001' << 3 AS RESULT; + result + ---------- + 01000 + (1 row) + ``` + +- \>\> + + Description: Shifts right in a bit string. + + For example: + + ``` + postgres=# SELECT B'10001' >> 2 AS RESULT; + result + ---------- + 00100 + (1 row) + ``` + + +The following SQL-standard functions work on bit strings as well as strings: **length**, **bit\_length**, **octet\_length**, **position**, **substring**, and **overlay**. + +The following functions work on bit strings as well as binary strings: **get\_bit** and **set\_bit**. When working with a bit string, these functions number the first \(leftmost\) bit of the string as bit 0. + +In addition, it is possible to convert between integral values and type **bit**. For example: + +``` +postgres=# SELECT 44::bit(10) AS RESULT; + result +------------ + 0000101100 +(1 row) + +postgres=# SELECT 44::bit(3) AS RESULT; + result +-------- + 100 +(1 row) + +postgres=# SELECT cast(-44 as bit(12)) AS RESULT; + result +-------------- + 111111010100 +(1 row) + +postgres=# SELECT '1110'::bit(4)::integer AS RESULT; + result +-------- + 14 +(1 row) +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>Casting to just "bit" means casting to bit\(1\), and so will deliver only the least significant bit of the integer. + diff --git a/content/en/docs/Developerguide/bit-string-types.md b/content/en/docs/Developerguide/bit-string-types.md new file mode 100644 index 000000000..151c7a4c6 --- /dev/null +++ b/content/en/docs/Developerguide/bit-string-types.md @@ -0,0 +1,44 @@ +# Bit String Types + +Bit strings are strings of 1's and 0's. They can be used to store bit masks. + +openGauss supports two bit string types: bit\(n\) and bit varying\(n\), in which **n** is a positive integer. + +The **bit** type data must match the length **n** exactly. It is an error to attempt to store shorter or longer bit strings. The **bit varying** data is of variable length up to the maximum length **n**; longer strings will be rejected. Writing **bit** without a length is equivalent to **bit\(1\)**, while **bit varying** without a length specification means unlimited length. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If one explicitly casts a bit-string value to **bit\(n\)**, it will be truncated or zero-padded on the right to be exactly **n** bits, without raising an error. +>Similarly, if one explicitly casts a bit-string value to **bit varying\(n\)**, it will be truncated on the right if it is more than **n** bits. + +``` +-- Create a table. +postgres=# CREATE TABLE bit_type_t1 +( + BT_COL1 INTEGER, + BT_COL2 BIT(3), + BT_COL3 BIT VARYING(5) +) ; + +-- Insert data. +postgres=# INSERT INTO bit_type_t1 VALUES(1, B'101', B'00'); + +-- Specify the type length. An error is reported if an inserted string exceeds this length. +postgres=# INSERT INTO bit_type_t1 VALUES(2, B'10', B'101'); +ERROR: bit string length 2 does not match type bit(3) +CONTEXT: referenced column: bt_col2 + +-- Specify the type length. Data is converted if it exceeds this length. +postgres=# INSERT INTO bit_type_t1 VALUES(2, B'10'::bit(3), B'101'); + +-- View data. +postgres=# SELECT * FROM bit_type_t1; + bt_col1 | bt_col2 | bt_col3 +---------+---------+--------- + 1 | 101 | 00 + 2 | 100 | 101 +(2 rows) + +-- Delete the table. +postgres=# DROP TABLE bit_type_t1; +``` + diff --git a/content/en/docs/Developerguide/boolean-data-types.md b/content/en/docs/Developerguide/boolean-data-types.md new file mode 100644 index 000000000..402a3dfd8 --- /dev/null +++ b/content/en/docs/Developerguide/boolean-data-types.md @@ -0,0 +1,72 @@ +# Boolean Data Types + +**Table 1** Boolean types + + + + + + + + + + + + + + +

Name

+

Description

+

Storage Space

+

Value

+

BOOLEAN

+

Boolean type

+

1 byte

+
  • true
  • false
  • null (unknown)
+
+ +Valid literal values for the "true" state include: + +TRUE, 't', 'true', 'y', 'yes', and '1' + +Valid literal values for the "false" state include: + +FALSE, 'f', 'false', 'n', 'no', and '0' + +**TRUE** and **FALSE** are standard expressions, compatible with SQL statements. + +## Example + +Boolean values are displayed using the letters t and f. + +``` +-- Create a table. +postgres=# CREATE TABLE bool_type_t1 +( + BT_COL1 BOOLEAN, + BT_COL2 TEXT +); + +-- Insert data. +postgres=# INSERT INTO bool_type_t1 VALUES (TRUE, 'sic est'); + +postgres=# INSERT INTO bool_type_t1 VALUES (FALSE, 'non est'); + +-- View data. +postgres=# SELECT * FROM bool_type_t1; + bt_col1 | bt_col2 +---------+--------- + t | sic est + f | non est +(2 rows) + +postgres=# SELECT * FROM bool_type_t1 WHERE bt_col1 = 't'; + bt_col1 | bt_col2 +---------+--------- + t | sic est +(1 row) + +-- Delete the table. +postgres=# DROP TABLE bool_type_t1; +``` + diff --git a/content/en/docs/Developerguide/branch-statements.md b/content/en/docs/Developerguide/branch-statements.md new file mode 100644 index 000000000..457b37860 --- /dev/null +++ b/content/en/docs/Developerguide/branch-statements.md @@ -0,0 +1,56 @@ +# Branch Statements + +## Syntax + +[Figure 1](#en-us_topic_0237122235_en-us_topic_0059779327_fe2376535378e44c78c4e70078d0fb779) shows the syntax diagram for a branch statement. + +**Figure 1** case\_when::= +![](figures/case_when.png "case_when") + +[Figure 2](#en-us_topic_0237122235_en-us_topic_0059779327_f0b6779d008024e8fb5c2267d8d3bff14) shows the syntax diagram for **when\_clause**. + +**Figure 2** when\_clause::= +![](figures/when_clause.png "when_clause") + +Parameter description: + +- _case\_expression_: specifies the variable or expression. +- _when\_expression_: specifies the constant or conditional expression. +- _statement_: specifies the statement to be executed. + +## Examples + +``` +CREATE OR REPLACE PROCEDURE proc_case_branch(pi_result in integer, pi_return out integer) +AS + BEGIN + CASE pi_result + WHEN 1 THEN + pi_return := 111; + WHEN 2 THEN + pi_return := 222; + WHEN 3 THEN + pi_return := 333; + WHEN 6 THEN + pi_return := 444; + WHEN 7 THEN + pi_return := 555; + WHEN 8 THEN + pi_return := 666; + WHEN 9 THEN + pi_return := 777; + WHEN 10 THEN + pi_return := 888; + ELSE + pi_return := 999; + END CASE; + raise info 'pi_return : %',pi_return ; +END; +/ + +CALL proc_case_branch(3,0); + +-- Delete the stored procedure. +DROP PROCEDURE proc_case_branch; +``` + diff --git a/content/en/docs/Developerguide/cache-io.md b/content/en/docs/Developerguide/cache-io.md new file mode 100644 index 000000000..856121a7b --- /dev/null +++ b/content/en/docs/Developerguide/cache-io.md @@ -0,0 +1,61 @@ +# Cache/IO + +- **[STATIO\_USER\_TABLES](statio_user_tables.md)** + +- **[SUMMARY\_STATIO\_USER\_TABLES](summary_statio_user_tables.md)** + +- **[GLOBAL\_STATIO\_USER\_TABLES](global_statio_user_tables.md)** + +- **[STATIO\_USER\_INDEXES](statio_user_indexes.md)** + +- **[SUMMARY\_STATIO\_USER\_INDEXES](summary_statio_user_indexes.md)** + +- **[GLOBAL\_STATIO\_USER\_INDEXES](global_statio_user_indexes.md)** + +- **[STATIO\_USER\_SEQUENCES](statio_user_sequences.md)** + +- **[SUMMARY\_STATIO\_USER\_SEQUENCES](summary_statio_user_sequences.md)** + +- **[GLOBAL\_STATIO\_USER\_SEQUENCES](global_statio_user_sequences.md)** + +- **[STATIO\_SYS\_TABLES](statio_sys_tables.md)** + +- **[SUMMARY\_STATIO\_SYS\_TABLES](summary_statio_sys_tables.md)** + +- **[GLOBAL\_STATIO\_SYS\_TABLES](global_statio_sys_tables.md)** + +- **[STATIO\_SYS\_INDEXES](statio_sys_indexes.md)** + +- **[SUMMARY\_STATIO\_SYS\_INDEXES](summary_statio_sys_indexes.md)** + +- **[GLOBAL\_STATIO\_SYS\_INDEXES](global_statio_sys_indexes.md)** + +- **[STATIO\_SYS\_SEQUENCES](statio_sys_sequences.md)** + +- **[SUMMARY\_STATIO\_SYS\_SEQUENCES](summary_statio_sys_sequences.md)** + +- **[GLOBAL\_STATIO\_SYS\_SEQUENCES](global_statio_sys_sequences.md)** + +- **[STATIO\_ALL\_TABLES](statio_all_tables.md)** + +- **[SUMMARY\_STATIO\_ALL\_TABLES](summary_statio_all_tables.md)** + +- **[GLOBAL\_STATIO\_ALL\_TABLES](global_statio_all_tables.md)** + +- **[STATIO\_ALL\_INDEXES](statio_all_indexes.md)** + +- **[SUMMARY\_STATIO\_ALL\_INDEXES](summary_statio_all_indexes.md)** + +- **[GLOBAL\_STATIO\_ALL\_INDEXES](global_statio_all_indexes.md)** + +- **[STATIO\_ALL\_SEQUENCES](statio_all_sequences.md)** + +- **[SUMMARY\_STATIO\_ALL\_SEQUENCES](summary_statio_all_sequences.md)** + +- **[GLOBAL\_STATIO\_ALL\_SEQUENCES](global_statio_all_sequences.md)** + +- **[GLOBAL\_STAT\_DB\_CU](global_stat_db_cu.md)** + +- **[GLOBAL\_STAT\_SESSION\_CU](global_stat_session_cu.md)** + + diff --git a/content/en/docs/Developerguide/call-statement.md b/content/en/docs/Developerguide/call-statement.md new file mode 100644 index 000000000..7bbf24903 --- /dev/null +++ b/content/en/docs/Developerguide/call-statement.md @@ -0,0 +1,38 @@ +# Call Statement + +## Syntax + +[Figure 1](#en-us_topic_0237122223_en-us_topic_0059778001_fa4de2ab1dc7e4c04b4997c6238ee1861) shows the syntax diagram for calling a clause. + +**Figure 1** call\_clause::= +![](figures/call_clause.png "call_clause") + +The above syntax diagram is explained as follows: + +- **procedure\_name** specifies the name of a stored procedure. +- **parameter** specifies the parameters for the stored procedure. You can set no parameter or multiple parameters. + +## Example + +``` +-- Create the stored procedure proc_staffs: +postgres=# CREATE OR REPLACE PROCEDURE proc_staffs +( +section NUMBER(6), +salary_sum out NUMBER(8,2), +staffs_count out INTEGER +) +IS +BEGIN +SELECT sum(salary), count(*) INTO salary_sum, staffs_count FROM hr.staffs where section_id = section; +END; +/ + +-- Invoke a stored procedure proc_return: +postgres=# CALL proc_staffs(2,8,6); + +-- Delete a stored procedure: +postgres=# DROP PROCEDURE proc_staffs; + +``` + diff --git a/content/en/docs/Developerguide/call.md b/content/en/docs/Developerguide/call.md new file mode 100644 index 000000000..f8cf03397 --- /dev/null +++ b/content/en/docs/Developerguide/call.md @@ -0,0 +1,79 @@ +# CALL + +## Function + +**CALL** can be used to call defined functions and stored procedures. + +## Precautions + +None + +## Syntax + +``` +CALL [schema.] {func_name| procedure_name} ( param_expr ); +``` + +## Parameter Description + +- **schema** + + Specifies the name of the schema where a function or stored procedure is located. + +- **func\_name** + + Specifies the name of the function or stored procedure to be called. + + Value range: an existing function name + +- **param\_expr** + + Specifies a list of parameters in the function. Use := or =\> to separate a parameter name and its value. This method allows parameters to be placed in any order. If only parameter values are in the list, the value order must be the same as that defined in the function or stored procedure. + + Value range: an existing function parameter name or stored procedure parameter name + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The parameters include input parameters \(whose name and type are separated by IN\) and output parameters \(whose name and type are separated by OUT\). When you run the **CALL** statement to call a function or stored procedure, the parameter list must contain an output parameter for non-overloaded functions. You can set the output parameter to a variable or any constant. For details, see [Examples](#en-us_topic_0237122088_en-us_topic_0059778236_s299dc001fa4b48cd9b56412a73db23c0). For an overloaded package function, the parameter list can have no output parameter, but the function may not be found. If an output parameter is contained, it must be a constant. + + +## Examples + +``` +-- Create a function func_add_sql, calculate the sum of two integers, and return the result. +postgres=# CREATE FUNCTION func_add_sql(num1 integer, num2 integer) RETURN integer +AS +BEGIN +RETURN num1 + num2; +END; +/ + +-- Transfer by parameter value. +postgres=# CALL func_add_sql(1, 3); + +-- Transfer by naming tag method. +postgres=# CALL func_add_sql(num1 => 1,num2 => 3); +postgres=# CALL func_add_sql(num2 := 2, num1 := 3); + +-- Delete the function. +postgres=# DROP FUNCTION func_add_sql; + +-- Create a function with output parameters. +postgres=# CREATE FUNCTION func_increment_sql(num1 IN integer, num2 IN integer, res OUT integer) +RETURN integer +AS +BEGIN +res := num1 + num2; +END; +/ + +-- Transfer a constant as an output parameter. +postgres=# CALL func_increment_sql(1,2,1); + +-- Call the function. +postgres=# call package_func_overload(1, 'test'); +postgres=# call package_func_overload(1, 1); + +-- Delete the function. +postgres=# DROP FUNCTION func_increment_sql; +``` + diff --git a/content/en/docs/Developerguide/character-data-types.md b/content/en/docs/Developerguide/character-data-types.md new file mode 100644 index 000000000..5938cccf1 --- /dev/null +++ b/content/en/docs/Developerguide/character-data-types.md @@ -0,0 +1,152 @@ +# Character Data Types + +[Table 1](#en-us_topic_0237121950_en-us_topic_0059777889_en-us_topic_0058966269_table29186418) lists the character data types supported by openGauss. For string operators and related built-in functions, see [Character Processing Functions and Operators](character-processing-functions-and-operators.md). + +**Table 1** Character data types + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Description

+

Storage Space

+

CHAR(n)

+

CHARACTER(n)

+

NCHAR(n)

+

Fixed-length character string, blank padded. n indicates the string length. If it is not specified, the default precision 1 is used.

+

The maximum size is 10 MB.

+

VARCHAR(n)

+

CHARACTER VARYING(n)

+

Variable-length string n indicates the string length.

+

The maximum size is 10 MB.

+

VARCHAR2(n)

+

Variable-length string Alias of the VARCHAR(n) type. n indicates the string length.

+

The maximum size is 10 MB.

+

NVARCHAR2(n)

+

Variable-length string. n indicates the string length.

+

The maximum size is 10 MB.

+

CLOB

+

A big text object. Alias of the TEXT type.

+

The maximum size is 8,203 bytes less than 1 GB, namely, 1,073,733,621 bytes.

+

TEXT

+

Variable-length string

+

The maximum size is 8,203 bytes less than 1 GB, namely, 1,073,733,621 bytes.

+
+ +>![](public_sys-resources/icon-note.gif) **NOTE:** +>In addition to the size limitation on each column, the total size of each tuple is 8,203 bytes less than 1 GB, namely, 1,073,733,621 bytes. + +In openGauss, there are two other fixed-length character types, as shown in [Table 2](#en-us_topic_0237121950_en-us_topic_0059777889_tf74658686f5e4d979adf0ac04769ea16). The **name** type exists only for the storage of identifiers in the internal system catalogs and is not intended for use by general users. Its length is currently defined as 64 bytes \(63 usable characters plus terminator\). The type **"char"** only uses one byte of storage. It is internally used in the system catalogs as a simplistic enumeration type. + +**Table 2** Special character types + + + + + + + + + + + + + + + + +

Name

+

Description

+

Storage Space

+

name

+

Internal type for object names

+

64 bytes

+

"char"

+

Single-byte internal type

+

1 byte

+
+ +## Example + +``` +-- Create a table. +postgres=# CREATE TABLE char_type_t1 +( + CT_COL1 CHARACTER(4) +); + +-- Insert data. +postgres=# INSERT INTO char_type_t1 VALUES ('ok'); + +-- Query data in the table. +postgres=# SELECT ct_col1, char_length(ct_col1) FROM char_type_t1; + ct_col1 | char_length +---------+------------- + ok | 4 +(1 row) + +-- Delete the table. +postgres=# DROP TABLE char_type_t1; +``` + +``` +-- Create a table. +postgres=# CREATE TABLE char_type_t2 +( + CT_COL1 VARCHAR(5) +); + +-- Insert data. +postgres=# INSERT INTO char_type_t2 VALUES ('ok'); + +postgres=# INSERT INTO char_type_t2 VALUES ('good'); + +-- Specify the type length. An error is reported if an inserted string exceeds this length. +postgres=# INSERT INTO char_type_t2 VALUES ('too long'); +ERROR: value too long for type character varying(4) +CONTEXT: referenced column: ct_col1 + +-- Specify the type length. A string exceeding this length is truncated. +postgres=# INSERT INTO char_type_t2 VALUES ('too long'::varchar(5)); + +-- Query data. +postgres=# SELECT ct_col1, char_length(ct_col1) FROM char_type_t2; + ct_col1 | char_length +---------+------------- + ok | 2 + good | 5 + too l | 5 +(3 rows) + +-- Delete data. +postgres=# DROP TABLE char_type_t2; +``` + diff --git a/content/en/docs/Developerguide/character-processing-functions-and-operators.md b/content/en/docs/Developerguide/character-processing-functions-and-operators.md new file mode 100644 index 000000000..a5e8a08f0 --- /dev/null +++ b/content/en/docs/Developerguide/character-processing-functions-and-operators.md @@ -0,0 +1,1506 @@ +# Character Processing Functions and Operators + +String functions and operators provided by openGauss are for concatenating strings with each other, concatenating strings with non-strings, and matching the patterns of strings. + +- bit\_length\(string\) + + Description: Specifies the number of bits occupied by a string. + + Return type: int + + Example: + + ``` + postgres=# SELECT bit_length('world'); + bit_length + ------------ + 40 + (1 row) + ``` + +- btrim\(string text \[, characters text\]\) + + Description: Removes the longest string consisting only of characters in **characters** \(a space by default\) from the start and end of **string**. + + Return type: text + + Example: + + ``` + postgres=# SELECT btrim('sring' , 'ing'); + btrim + ------- + sr + (1 row) + ``` + +- char\_length\(string\) or character\_length\(string\) + + Description: Number of characters in a string + + Return type: int + + Example: + + ``` + postgres=# SELECT char_length('hello'); + char_length + ------------- + 5 + (1 row) + ``` + +- instr\(text,text,int,int\) + + Description: instr\(string1,string2,int1,int2\) returns the position where _string2_ has been matched for _int2_ times starting from _int1_ in _string1_. _int1_ indicates the starting position for matching, and _int2_ indicates the number of matching times. + + Return type: int + + Example: + + ``` + postgres=# SELECT instr( 'abcdabcdabcd', 'bcd', 2, 2 ); + instr + ------- + 6 + (1 row) + ``` + +- lengthb\(text/bpchar\) + + Description: Obtains the number of bytes of a specified string. + + Return type: int + + Example: + + ``` + postgres=# SELECT lengthb('hello'); + lengthb + --------- + 5 + (1 row) + ``` + +- left\(str text, n int\) + + Description: Returns first **n** characters in the string. When **n** is negative, return all but the last **|n|** characters. + + Return type: text + + Example: + + ``` + postgres=# SELECT left('abcde', 2); + left + ------ + ab + (1 row) + ``` + +- length\(string bytea, encoding name \) + + Description: Number of characters in **string** in the given **encoding**. The **string** must be valid in this encoding. + + Return type: int + + Example: + + ``` + postgres=# SELECT length('jose', 'UTF8'); + length + -------- + 4 + (1 row) + ``` + +- lpad\(string text, length int \[, fill text\]\) + + Description: Fills up the string to length **length** by prepending the characters **fill** \(a space by default\). If the **string** is already longer than **length** then it is truncated \(on the right\). + + Return type: text + + Example: + + ``` + postgres=# SELECT lpad('hi', 5, 'xyza'); + lpad + ------- + xyzhi + (1 row) + ``` + +- octet\_length\(string\) + + Description: Number of bytes in a string + + Return type: int + + Example: + + ``` + postgres=# SELECT octet_length('jose'); + octet_length + -------------- + 4 + (1 row) + ``` + +- overlay\(string placing string FROM int \[for int\]\) + + Description: Replaces substring. **FROM int** indicates the start position of the replacement in the first string. **for int** indicates the number of characters replaced in the first string. + + Return type: text + + Example: + + ``` + postgres=# SELECT overlay('hello' placing 'world' from 2 for 3 ); + overlay + --------- + hworldo + (1 row) + ``` + +- position\(substring in string\) + + Description: Specifies the position of a substring. + + Return type: int + + Example: + + ``` + postgres=# SELECT position('ing' in 'string'); + position + ---------- + 4 + (1 row) + ``` + +- pg\_client\_encoding\(\) + + Description: Current client encoding name + + Return type: name + + Example: + + ``` + postgres=# SELECT pg_client_encoding(); + pg_client_encoding + -------------------- + UTF8 + (1 row) + ``` + +- quote\_ident\(string text\) + + Description: Returns the given string suitably quoted to be used as an identifier in an SQL statement string \(quotation marks are used as required\). Quotes are added only if necessary \(that is, if the string contains non-identifier characters or would be case-folded\). Embedded quotes are properly doubled. + + Return type: text + + Example: + + ``` + postgres=# SELECT quote_ident('hello world'); + quote_ident + -------------- + "hello world" + (1 row) + ``` + +- quote\_literal\(string text\) + + Description: Returns the given string suitably quoted to be used as a string literal in an SQL statement string \(quotation marks are used as required\). + + Return type: text + + Example: + + ``` + postgres=# SELECT quote_literal('hello'); + quote_literal + --------------- + 'hello' + (1 row) + ``` + + If command similar to the following exists, text will be escaped. + + ``` + postgres=# SELECT quote_literal(E'O\'hello'); + quote_literal + --------------- + 'O''hello' + (1 row) + ``` + + If command similar to the following exists, backslash will be properly doubled. + + ``` + postgres=# SELECT quote_literal('O\hello'); + quote_literal + --------------- + E'O\\hello' + (1 row) + ``` + + If the parameter is null, return **NULL**. If the parameter may be null, you are advised to use **quote\_nullable**. + + ``` + postgres=# SELECT quote_literal(NULL); + quote_literal + --------------- + + (1 row) + ``` + +- quote\_literal\(value anyelement\) + + Description: Converts the given value to text and then quotes it as a literal. + + Return type: text + + Example: + + ``` + postgres=# SELECT quote_literal(42.5); + quote_literal + --------------- + '42.5' + (1 row) + ``` + + If command similar to the following exists, the given value will be escaped. + + ``` + postgres=# SELECT quote_literal(E'O\'42.5'); + quote_literal + --------------- + '0''42.5' + (1 row) + ``` + + If command similar to the following exists, backslash will be properly doubled. + + ``` + postgres=# SELECT quote_literal('O\42.5'); + quote_literal + --------------- + E'O\\42.5' + (1 row) + ``` + +- quote\_nullable\(string text\) + + Description: Returns the given string suitably quoted to be used as a string literal in an SQL statement string \(quotation marks are used as required\). + + Return type: text + + Example: + + ``` + postgres=# SELECT quote_nullable('hello'); + quote_nullable + ---------------- + 'hello' + (1 row) + ``` + + If command similar to the following exists, text will be escaped. + + ``` + postgres=# SELECT quote_nullable(E'O\'hello'); + quote_nullable + ---------------- + 'O''hello' + (1 row) + ``` + + If command similar to the following exists, backslash will be properly doubled. + + ``` + postgres=# SELECT quote_nullable('O\hello'); + quote_nullable + ---------------- + E'O\\hello' + (1 row) + ``` + + If the parameter is null, return **NULL**. + + ``` + postgres=# SELECT quote_nullable(NULL); + quote_nullable + ---------------- + NULL + (1 row) + ``` + +- quote\_nullable\(value anyelement\) + + Description: Converts the given value to text and then quotes it as a literal. + + Return type: text + + Example: + + ``` + postgres=# SELECT quote_nullable(42.5); + quote_nullable + ---------------- + '42.5' + (1 row) + ``` + + If command similar to the following exists, the given value will be escaped. + + ``` + postgres=# SELECT quote_nullable(E'O\'42.5'); + quote_nullable + ---------------- + 'O''42.5' + (1 row) + ``` + + If command similar to the following exists, backslash will be properly doubled. + + ``` + postgres=# SELECT quote_nullable('O\42.5'); + quote_nullable + ---------------- + E'O\\42.5' + (1 row) + ``` + + If the parameter is null, return **NULL**. + + ``` + postgres=# SELECT quote_nullable(NULL); + quote_nullable + ---------------- + NULL + (1 row) + ``` + +- substring\(string \[from int\] \[for int\]\) + + Description: Extracts a substring. **from int** indicates the start position of the truncation. **for int** indicates the number of characters truncated. + + Return type: text + + Example: + + ``` + postgres=# SELECT substring('Thomas' from 2 for 3); + substring + ----------- + hom + (1 row) + ``` + +- substring\(string from _pattern_\) + + Description: Extracts substring matching POSIX regular expression. It returns the text that matches the pattern. If no match record is found, a null value is returned. + + Return type: text + + Example: + + ``` + postgres=# SELECT substring('Thomas' from '...$'); + substring + ----------- + mas + (1 row) + postgres=# SELECT substring('foobar' from 'o(.)b'); + result + -------- + o + (1 row) + postgres=# SELECT substring('foobar' from '(o(.)b)'); + result + -------- + oob + (1 row) + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >If the POSIX pattern contains any parentheses, the portion of the text that matched the first parenthesized sub-expression \(the one whose left parenthesis comes first\) is returned. You can put parentheses around the whole expression if you want to use parentheses within it without triggering this exception. + +- substring\(string from _pattern_ for _escape_\) + + Description: Extracts substring matching SQL regular expression. The declared schema must match the entire data string; otherwise, the function fails and returns a null value. To indicate the part of the pattern that should be returned on success, the pattern must contain two occurrences of the escape character followed by a double quote \("\). The text matching the portion of the pattern between these markers is returned. + + Return type: text + + Example: + + ``` + postgres=# SELECT substring('Thomas' from '%#"o_a#"_' for '#'); + substring + ----------- + oma + (1 row) + ``` + +- rawcat\(raw,raw\) + + Description: Indicates the string concatenation functions. + + Return type: raw + + Example: + + ``` + postgres=# SELECT rawcat('ab','cd'); + rawcat + -------- + ABCD + (1 row) + ``` + +- regexp\_like\(text,text,text\) + + Description: Indicates the mode matching function of a regular expression. + + Return type: bool + + Example: + + ``` + postgres=# SELECT regexp_like('str','[ac]'); + regexp_like + ------------- + f + (1 row) + ``` + +- regexp\_substr\(text,text\) + + Description: Extracts substrings from a regular expression. Its function is similar to **substr**. When a regular expression contains multiple parallel brackets, it also needs to be processed. + + Return type: text + + Example: + + ``` + postgres=# SELECT regexp_substr('str','[ac]'); + regexp_substr + --------------- + + (1 row) + ``` + +- regexp\_matches\(string text, pattern text \[, flags text\]\) + + Description: Returns all captured substrings resulting from matching a POSIX regular expression against the **string**. If the pattern does not match, the function returns no rows. If the pattern contains no parenthesized sub-expressions, then each row returned is a single-element text array containing the substring matching the whole pattern. If the pattern contains parenthesized sub-expressions, the function returns a text array whose _n_th element is the substring matching the _n_th parenthesized sub-expression of the pattern. + + The optional **flags** argument contains zero or multiple single-letter flags that change function behavior. **i** indicates that the matching is not related to uppercase and lowercase. **g** indicates that each matching substring is replaced, instead of replacing only the first one. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >If the last parameter is provided but the parameter value is an empty string \(''\) and the SQL compatibility mode of the database is set to ORA, the returned result is an empty set. This is because the ORA compatible mode treats the empty string \(''\) as **NULL**. To resolve this problem, you can: + >- Change the database SQL compatibility mode to TD. + >- Do not provide the last parameter or do not set the last parameter to an empty string. + + Return type: setof text\[\] + + Example: + + ``` + postgres=# SELECT regexp_matches('foobarbequebaz', '(bar)(beque)'); + regexp_matches + ---------------- + {bar,beque} + (1 row) + postgres=# SELECT regexp_matches('foobarbequebaz', 'barbeque'); + regexp_matches + ---------------- + {barbeque} + (1 row) + postgres=# SELECT regexp_matches('foobarbequebazilbarfbonk', '(b[^b]+)(b[^b]+)', 'g'); + result + -------------- + {bar,beque} + {bazil,barf} + (2 rows) + ``` + +- regexp\_split\_to\_array\(string text, pattern text \[, flags text \]\) + + Description: Splits **string** using a POSIX regular expression as the delimiter. The regexp\_split\_to\_array function behaves the same as regexp\_split\_to\_table, except that regexp\_split\_to\_array returns its result as an array of text. + + Return type: text\[\] + + Example: + + ``` + postgres=# SELECT regexp_split_to_array('hello world', E'\\s+'); + regexp_split_to_array + ----------------------- + {hello,world} + (1 row) + ``` + +- regexp\_split\_to\_table\(string text, pattern text \[, flags text\]\) + + Description: Splits **string** using a POSIX regular expression as the delimiter. If there is no match to the pattern, the function returns the string. If there is at least one match, for each match it returns the text from the end of the last match \(or the beginning of the string\) to the beginning of the match. When there are no more matches, it returns the text from the end of the last match to the end of the string. + + The **flags** parameter is a text string containing zero or more single-letter flags that change the function's behavior. **i** indicates that the matching is not related to uppercase and lowercase. **g** indicates that each matching substring is replaced, instead of replacing only the first one. + + Return type: setof text + + Example: + + ``` + postgres=# SELECT regexp_split_to_table('hello world', E'\\s+'); + regexp_split_to_table + ----------------------- + hello + world + (2 rows) + ``` + +- repeat\(string text, number int \) + + Description: Repeats **string** the specified number of times. + + Return type: text + + Example: + + ``` + postgres=# SELECT repeat('Pg', 4); + repeat + ---------- + PgPgPgPg + (1 row) + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The maximum size of memory allocated at a time cannot exceed 1 GB due to the memory allocation mechanism of the database. Therefore, the maximum value of **number** cannot exceed \(1 GB - **x**\)/**lengthb** \(**string**\) - 1. **x** indicates the header length, which is usually greater than 4 bytes. The value varies in different scenarios. + +- replace\(string text, from text, to text\) + + Description: Replaces all occurrences in **string** of substring **from** with substring **to**. + + Return type: text + + Example: + + ``` + postgres=# SELECT replace('abcdefabcdef', 'cd', 'XXX'); + replace + ---------------- + abXXXefabXXXef + (1 row) + ``` + + +- reverse\(str\) + + Description: Returns reversed string. + + Return type: text + + Example: + + ``` + postgres=# SELECT reverse('abcde'); + reverse + --------- + edcba + (1 row) + ``` + +- right\(str text, n int\) + + Description: Returns the last **n** characters in a string. When **n** is negative, return all but first **|n|** characters. + + Return type: text + + Example: + + ``` + postgres=# SELECT right('abcde', 2); + right + ------- + de + (1 row) + + postgres=# SELECT right('abcde', -2); + right + ------- + cde + (1 row) + ``` + +- rpad\(string text, length int \[, fill text\]\) + + Description: Fills up the string to length **length** by prepending the characters **fill** \(a space by default\). If the string is already longer than length then it is truncated. + + Return type: text + + Example: + + ``` + postgres=# SELECT rpad('hi', 5, 'xy'); + rpad + ------- + hixyx + (1 row) + ``` + +- rtrim\(string text \[, characters text\]\) + + Description: Removes the longest string containing only characters from characters \(a space by default\) from the end of string. + + Return type: text + + Example: + + ``` + postgres=# SELECT rtrim('trimxxxx', 'x'); + rtrim + ------- + trim + (1 row) + ``` + +- substrb\(text,int,int\) + + Description: Extracts a substring. The first **int **indicates the start position of the subtraction. The second **int** indicates the number of characters subtracted. + + Return type: text + + Example: + + ``` + postgres=# SELECT substrb('string',2,3); + substrb + --------- + tri + (1 row) + ``` + +- substrb\(text,int\) + + Description: Extracts a substring. **int** indicates the start position of the subtraction. + + Return type: text + + Example: + + ``` + postgres=# SELECT substrb('string',2); + substrb + --------- + tring + (1 row) + ``` + +- substr\(bytea,from,count\) + + Description: Extracts a substring from **bytea**. **from** specifies the position where the extraction starts. **count** specifies the length of the extracted substring. + + Return type: text + + Example: + + ``` + postgres=# SELECT substr('string',2,3); + substr + -------- + tri + (1 row) + ``` + +- string || string + + Description: Concatenates strings. + + Return type: text + + Example: + + ``` + postgres=# SELECT 'MPP'||'DB' AS RESULT; + result + -------- + MPPDB + (1 row) + ``` + +- string || non-string or non-string || string + + Description: Concatenates strings and non-strings. + + Return type: text + + Example: + + ``` + postgres=# SELECT 'Value: '||42 AS RESULT; + result + ----------- + Value: 42 + (1 row) + ``` + +- split\_part\(string text, delimiter text, field int\) + + Description: Splits **string** on **delimiter** and returns the **field**th column \(counting from text of the first appeared delimiter\). + + Return type: text + + Example: + + ``` + postgres=# SELECT split_part('abc~@~def~@~ghi', '~@~', 2); + split_part + ------------ + def + (1 row) + ``` + +- strpos\(string, substring\) + + Description: Specifies the position of a substring. It is the same as **position\(substring in string\)**. However, the parameter sequences of them are reversed. + + Return type: int + + Example: + + ``` + postgres=# SELECT strpos('source', 'rc'); + strpos + -------- + 4 + (1 row) + ``` + +- to\_hex\(number int or bigint\) + + Description: Converts number to a hexadecimal expression. + + Return type: text + + Example: + + ``` + postgres=# SELECT to_hex(2147483647); + to_hex + ---------- + 7fffffff + (1 row) + ``` + +- translate\(string text, from text, to text\) + + Description: Any character in **string** that matches a character in the **from** set is replaced by the corresponding character in the **to** set. If **from** is longer than **to**, extra characters occurred in **from** are removed. + + Return type: text + + Example: + + ``` + postgres=# SELECT translate('12345', '143', 'ax'); + translate + ----------- + a2x5 + (1 row) + ``` + + +- length\(string\) + + Description: Obtains the number of characters in a string. + + Return type: integer + + Example: + + ``` + postgres=# SELECT length('abcd'); + length + -------- + 4 + (1 row) + ``` + +- lengthb\(string\) + + Description: Obtains the number of characters in a string. The value depends on character sets \(GBK and UTF8\). + + Return type: integer + + Example: + + ``` + postgres=# SELECT lengthb('Chinese'); + lengthb + --------- + 7 + (1 row) + ``` + +- substr\(string,from\) + + Description: + + Extracts substrings from a string. + + **from** indicates the start position of the extraction. + + - If **from** starts at 0, the value **1** is used. + - If the value of **from** is positive, all characters from **from** to the end are extracted. + - If the value of **from** is negative, the last n characters in the string are extracted, in which n indicates the absolute value of **from**. + + Return type: varchar + + Example: + + If the value of **from** is positive: + + ``` + postgres=# SELECT substr('ABCDEF',2); + substr + -------- + BCDEF + (1 row) + ``` + + If the value of **from** is negative: + + ``` + postgres=# SELECT substr('ABCDEF',-2); + substr + -------- + EF + (1 row) + ``` + +- substr\(string,from,count\) + + Description: + + Extracts substrings from a string. + + **from** indicates the start position of the extraction. + + **count** indicates the length of the extracted sub-string. + + - If **from** starts at 0, the value **1** is used. + - If the value of **from** is positive, extract **count** characters starting from **from**. + - If the value of **from** is negative, extract the last **n** **count** characters in the string, in which **n** indicates the absolute value of **from**. + - If the value of **count** is smaller than **1**, **null** is returned. + + Return type: varchar + + Example: + + If the value of **from** is positive: + + ``` + postgres=# SELECT substr('ABCDEF',2,2); + substr + -------- + BC + (1 row) + ``` + + If the value of **from** is negative: + + ``` + postgres=# SELECT substr('ABCDEF',-3,2); + substr + -------- + DE + (1 row) + ``` + +- substrb\(string,from\) + + Description: The functionality of this function is the same as that of **SUBSTR\(string,from\)**. However, the calculation unit is byte. + + Return type: bytea + + Example: + + ``` + postgres=# SELECT substrb('ABCDEF',-2); + substrb + --------- + EF + (1 row) + ``` + +- substrb\(string,from,count\) + + Description: The functionality of this function is the same as that of **SUBSTR\(string,from,count\)**. However, the calculation unit is byte. + + Return type: bytea + + Example: + + ``` + postgres=# SELECT substrb('ABCDEF',2,2); + substrb + --------- + BC + (1 row) + ``` + +- trim\(\[leading |trailing |both\] \[characters\] from string\) + + Description: Removes the longest string containing only the characters \(a space by default\) from the start/end/both ends of the string. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT trim(BOTH 'x' FROM 'xTomxx'); + btrim + ------- + Tom + (1 row) + ``` + + ``` + postgres=# SELECT trim(LEADING 'x' FROM 'xTomxx'); + ltrim + ------- + Tomxx + (1 row) + ``` + + ``` + postgres=# SELECT trim(TRAILING 'x' FROM 'xTomxx'); + rtrim + ------- + xTom + (1 row) + ``` + +- rtrim\(string \[, characters\]\) + + Description: Removes the longest string containing only characters from characters \(a space by default\) from the end of string. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT rtrim('TRIMxxxx','x'); + rtrim + ------- + TRIM + (1 row) + ``` + +- ltrim\(string \[, characters\]\) + + Description: Removes the longest string containing only characters from characters \(a space by default\) from the start of string. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT ltrim('xxxxTRIM','x'); + ltrim + ------- + TRIM + (1 row) + ``` + +- upper\(string\) + + Description: Converts the string into the uppercase. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT upper('tom'); + upper + ------- + TOM + (1 row) + ``` + +- lower\(string\) + + Description: Converts the string into the lowercase. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT lower('TOM'); + lower + ------- + tom + (1 row) + ``` + +- rpad\(string varchar, length int \[, fill varchar\]\) + + Description: Fills up the string to length **length** by prepending the characters **fill** \(a space by default\). If the string is already longer than length then it is truncated. + + **length** in openGauss indicates the character length. One Chinese character is counted as one character. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT rpad('hi',5,'xyza'); + rpad + ------- + hixyz + (1 row) + ``` + + ``` + postgres=# SELECT rpad('hi',5,'abcdefg'); + rpad + ------- + hiabc + (1 row) + ``` + +- instr\(string,substring\[,position,occurrence\]\) + + Description: Queries and returns the value of the substring position that occurs the occurrence \(first by default\) times from the position \(1 by default\) in the string. + + - If the value of **position** is **0**, **0** is returned. + - If the value of position is negative, searches backwards from the last _n_th character in the string, in which _n_ indicates the absolute value of position. + + In this function, the calculation unit is character. One Chinese character is one character. + + Return type: integer + + Example: + + ``` + postgres=# SELECT instr('corporate floor','or', 3); + instr + ------- + 5 + (1 row) + ``` + + ``` + postgres=# SELECT instr('corporate floor','or',-3,2); + instr + ------- + 2 + (1 row) + ``` + +- initcap\(string\) + + Description: The first letter of each word in the string is converted into the uppercase and the other letters are converted into the lowercase. + + Return type: text + + Example: + + ``` + postgres=# SELECT initcap('hi THOMAS'); + initcap + ----------- + Hi Thomas + (1 row) + ``` + +- ascii\(string\) + + Description: Indicates the ASCII code of the first character in the string. + + Return type: integer + + Example: + + ``` + postgres=# SELECT ascii('xyz'); + ascii + ------- + 120 + (1 row) + ``` + +- replace\(string varchar, search\_string varchar, replacement\_string varchar\) + + Description: Replaces all **search-string** in the string with **replacement\_string**. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT replace('jack and jue','j','bl'); + replace + ---------------- + black and blue + (1 row) + ``` + +- lpad\(string varchar, length int\[, repeat\_string varchar\]\) + + Description: Adds a series of **repeat\_string** \(a space by default\) on the left of the string to generate a new string with the total length of n. + + If the length of the string is longer than the specified length, the function truncates the string and returns the substrings with the specified length. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT lpad('PAGE 1',15,'*.'); + lpad + ----------------- + *.*.*.*.*PAGE 1 + (1 row) + ``` + + ``` + postgres=# SELECT lpad('hello world',5,'abcd'); + lpad + ------- + hello + (1 row) + ``` + +- concat\(str1,str2\) + + Description: Connects str1 and str2 and returns the string. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT concat('Hello', ' World!'); + concat + -------------- + Hello World! + (1 row) + postgres=# SELECT concat('Hello', NULL); + concat + -------- + Hello + (1 row) + ``` + +- chr\(integer\) + + Description: Specifies the character of the ASCII code. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT chr(65); + chr + ----- + A + (1 row) + ``` + +- regexp\_substr\(source\_char, pattern\) + + Description: Extracts substrings from a regular expression. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT regexp_substr('500 Hello World, Redwood Shores, CA', ',[^,]+,') "REGEXPR_SUBSTR"; + REGEXPR_SUBSTR + ------------------- + , Redwood Shores, + (1 row) + ``` + +- regexp\_replace\(string, pattern, replacement \[,flags \]\) + + Description: Replaces substring matching POSIX regular expression. The source string is returned unchanged if there is no match to the pattern. If there is a match, the source string is returned with the replacement string substituted for the matching substring. + + The replacement string can contain \\n, where n is 1 through 9, to indicate that the source substring matching the _n_th parenthesized sub-expression of the pattern should be inserted, and it can contain \\& to indicate that the substring matching the entire pattern should be inserted. + + The optional **flags** argument contains zero or multiple single-letter flags that change function behavior. **i** indicates that the matching is not related to uppercase and lowercase. **g** indicates that each matching substring is replaced, instead of replacing only the first one. + + Return type: varchar + + Example: + + ``` + postgres=# SELECT regexp_replace('Thomas', '.[mN]a.', 'M'); + regexp_replace + ---------------- + ThM + (1 row) + postgres=# SELECT regexp_replace('foobarbaz','b(..)', E'X\\1Y', 'g') AS RESULT; + result + ------------- + fooXarYXazY + (1 row) + ``` + +- concat\_ws\(sep text, str"any" \[, str"any" \[, ...\] \]\) + + Description: The first parameter is used as the separator, which is associated with all following parameters. + + Return type: text + + Example: + + ``` + postgres=# SELECT concat_ws(',', 'ABCDE', 2, NULL, 22); + concat_ws + ------------ + ABCDE,2,22 + (1 row) + ``` + +- convert\(string bytea, src\_encoding name, dest\_encoding name\) + + Description: Converts the bytea string to **dest\_encoding**. **src\_encoding** specifies the source code encoding. The string must be valid in this encoding. + + Return type: bytea + + Example: + + ``` + postgres=# SELECT convert('text_in_utf8', 'UTF8', 'GBK'); + convert + ---------------------------- + \x746578745f696e5f75746638 + (1 row) + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >If the rule for converting between source to target encoding \(for example, GBK and LATIN1\) does not exist, the string is returned without conversion. See the **pg\_conversion** system catalog for details. + >Example: + >``` + >postgres=# show server_encoding; + > server_encoding + >----------------- + > LATIN1 + >(1 row) + >postgres=# SELECT convert_from('some text', 'GBK'); + > convert_from + >-------------- + > some text + >(1 row) + >db_latin1=# SELECT convert_to('some text', 'GBK'); + > convert_to + >---------------------- + > \x736f6d652074657874 + >(1 row) + >db_latin1=# SELECT convert('some text', 'GBK', 'LATIN1'); + > convert + >---------------------- + > \x736f6d652074657874 + >(1 row) + >``` + +- convert\_from\(string bytea, src\_encoding name\) + + Description: Converts the long bytea using the coding mode of the database. + + **src\_encoding** specifies the source code encoding. The string must be valid in this encoding. + + Return type: text + + Example: + + ``` + postgres=# SELECT convert_from('text_in_utf8', 'UTF8'); + convert_from + -------------- + text_in_utf8 + (1 row) + ``` + +- convert\_to\(string text, dest\_encoding name\) + + Description: Converts string to **dest\_encoding**. + + Return type: bytea + + Example: + + ``` + postgres=# SELECT convert_to('some text', 'UTF8'); + convert_to + ---------------------- + \x736f6d652074657874 + (1 row) + ``` + +- string \[NOT\] LIKE pattern \[ESCAPE escape-character\] + + Description: Pattern matching function + + If the pattern does not include a percentage sign \(%\) or an underscore \(\_\), this mode represents itself only. In this case, the behavior of LIKE is the same as the equal operator. The underscore \(\_\) in the pattern matches any single character while one percentage sign \(%\) matches no or multiple characters. + + To match with underscores \(\_\) or percent signs \(%\), corresponding characters in pattern must lead escape characters. The default escape character is a backward slash \(\\\) and can be specified using the **ESCAPE** clause. To match with escape characters, enter two escape characters. + + Return type: Boolean + + Example: + + ``` + postgres=# SELECT 'AA_BBCC' LIKE '%A@_B%' ESCAPE '@' AS RESULT; + result + -------- + t + (1 row) + ``` + + ``` + postgres=# SELECT 'AA_BBCC' LIKE '%A@_B%' AS RESULT; + result + -------- + f + (1 row) + ``` + + ``` + postgres=# SELECT 'AA@_BBCC' LIKE '%A@_B%' AS RESULT; + result + -------- + t + (1 row) + ``` + +- REGEXP\_LIKE\(source\_string, pattern \[, match\_parameter\]\) + + Description: Indicates the mode matching function of a regular expression. + + **source\_string** indicates the source string and **pattern** indicates the matching pattern of the regular expression. **match\_parameter** indicates the matching items and the values are as follows: + + - 'i': case-insensitive + - "c": case-sensitive + - "n": allowing the metacharacter "." in a regular expression to be matched with a linefeed. + - "m": allows **source\_string** to be regarded as multiple rows. + + If **match\_parameter** is ignored, **case-sensitive** is enabled by default, "." is not matched with a linefeed, and **source\_string** is regarded as a single row. + + Return type: Boolean + + Example: + + ``` + postgres=# SELECT regexp_like('ABC', '[A-Z]'); + regexp_like + ------------- + t + (1 row) + ``` + + ``` + postgres=# SELECT regexp_like('ABC', '[D-Z]'); + regexp_like + ------------- + f + (1 row) + ``` + + ``` + postgres=# SELECT regexp_like('ABC', '[A-Z]','i'); + regexp_like + ------------- + t + (1 row) + ``` + + ``` + postgres=# SELECT regexp_like('ABC', '[A-Z]'); + regexp_like + ------------- + t + (1 row) + ``` + +- format\(formatstr text \[, str"any" \[, ...\] \]\) + + Description: Formats a string. + + Return type: text + + Example: + + ``` + postgres=# SELECT format('Hello %s, %1$s', 'World'); + format + -------------------- + Hello World, World + (1 row) + ``` + +- md5\(string\) + + Description: Encrypts a string in MD5 mode and returns a value in hexadecimal form. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >MD5 is insecure and is not recommended. + + Return type: text + + Example: + + ``` + postgres=# SELECT md5('ABC'); + md5 + ---------------------------------- + 902fbdd2b1df0c4f70b4a5d23525e932 + (1 row) + ``` + +- decode\(string text, format text\) + + Description: Decodes binary data from textual representation. + + Return type: bytea + + Example: + + ``` + postgres=# SELECT decode('MTIzAAE=', 'base64'); + decode + -------------- + \x3132330001 + (1 row) + ``` + +- encode\(data bytea, format text\) + + Description: Encodes binary data into a textual representation. + + Return type: text + + Example: + + ``` + postgres=# SELECT encode(E'123\\000\\001', 'base64'); + encode + ---------- + MTIzAAE= + (1 row) + ``` + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- For a string containing newline characters, for example, a string consisting of a newline character and a space, the value of **length** and **lengthb** in openGauss is 2. +>- In openGauss, _n_ in the CHAR\(n\) type indicates the number of characters. Therefore, for multiple-octet coded character sets, the length returned by the LENGTHB function may be longer than n. + diff --git a/content/en/docs/Developerguide/checking-blocked-statements.md b/content/en/docs/Developerguide/checking-blocked-statements.md new file mode 100644 index 000000000..e37c7c47e --- /dev/null +++ b/content/en/docs/Developerguide/checking-blocked-statements.md @@ -0,0 +1,70 @@ +# Checking Blocked Statements + +During database running, query statements are blocked in some service scenarios and run for an excessively long time. In this case, you can forcibly terminate the faulty session. + +## Procedure + +1. Log in as the OS user **omm** to a database node. +2. Run the following command to connect to the database: + + ``` + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database node. + + If information similar to the following is displayed, the connection succeeds: + + ``` + gsql ((openGauss 1.0 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131 + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + postgres=# + ``` + +3. View blocked query statements and details about the tables and schemas that block the query statements. + + ``` + SELECT w.query as waiting_query, + w.pid as w_pid, + w.usename as w_user, + l.query as locking_query, + l.pid as l_pid, + l.usename as l_user, + t.schemaname || '.' || t.relname as tablename + from pg_stat_activity w join pg_locks l1 on w.pid = l1.pid + and not l1.granted join pg_locks l2 on l1.relation = l2.relation + and l2.granted join pg_stat_activity l on l2.pid = l.pid join pg_stat_user_tables t on l1.relation = t.relid + where w.waiting; + ``` + + The thread ID, user details, query status, as well as details about the tables and schemas that block the query statements are returned. + +4. Run the following command to terminate the required session, where **139834762094352** is the thread ID: + + ``` + SELECT PG_TERMINATE_BACKEND(139834762094352); + ``` + + If information similar to the following is displayed, the session is successfully terminated: + + ``` + PG_TERMINATE_BACKEND + ---------------------- + t + (1 row) + ``` + + If information similar to the following is displayed, a user is attempting to terminate the session, and the session will be reconnected rather than being terminated. + + ``` + FATAL: terminating connection due to administrator command + FATAL: terminating connection due to administrator command + The connection to the server was lost. Attempting reset: Succeeded. + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >If the **PG\_TERMINATE\_BACKEND** function is used to terminate the background threads of the session, the **gsql** client will be reconnected rather than be logged out. + + diff --git a/content/en/docs/Developerguide/checking-the-number-of-database-connections.md b/content/en/docs/Developerguide/checking-the-number-of-database-connections.md new file mode 100644 index 000000000..803c416ca --- /dev/null +++ b/content/en/docs/Developerguide/checking-the-number-of-database-connections.md @@ -0,0 +1,133 @@ +# Checking the Number of Database Connections + +## Background + +If the number of connections reaches its upper limit, new connections cannot be created. Therefore, if a user fails to connect a database, the administrator must check whether the number of connections has reached the upper limit. The following are details about database connections: + +- The maximum number of global connections is specified by the **max\_connections** parameter. Its default value is **5000**. +- The number of a user's connections is specified by **CONNECTION LIMIT connlimit** in the **CREATE ROLE** statement and can be changed using **CONNECTION LIMIT connlimit** in the **ALTER ROLE** statement. +- The number of a database's connections is specified by the **CONNECTION LIMIT connlimit** parameter in the **CREATE DATABASE** statement. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the following command to connect to the database: + + ``` + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + If information similar to the following is displayed, the connection succeeds: + + ``` + gsql ((openGauss 1.0 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + postgres=# + ``` + +3. View the upper limit of the number of global connections. + + ``` + postgres=# SHOW max_connections; + max_connections + ----------------- + 800 + (1 row) + ``` + + **800** is the maximum number of session connections. + +4. View the number of connections that have been used. + + For details, see [Table 1](#en-us_topic_0237121094_en-us_topic_0059779140_t608a1965463e41f1b6eacd02f97a65ba). + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >Except for database and usernames that are enclosed in double quotation marks \("\) during creation, uppercase letters are not allowed in the database and usernames in the commands in the following table. + + **Table 1** Viewing the number of session connections + + + + + + + + + + + + + + + + + + + + + + +

Description

+

Command

+

View the maximum number of sessions connected to a specific user.

+

Run the following commands to view the upper limit of the number of omm's session connections. -1 indicates that no upper limit is set for the number of omm's session connections.

+
postgres=# SELECT ROLNAME,ROLCONNLIMIT FROM PG_ROLES WHERE ROLNAME='omm';
+     rolname  | rolconnlimit
+    ----------+--------------
+     omm |           -1
+    (1 row)
+

View the number of session connections that have been used by a user.

+

Run the following commands to view the number of session connections that have been used by omm. 1 indicates the number of session connections that have been used by omm.

+
postgres=# CREATE OR REPLACE VIEW DV_SESSIONS AS
+    	       SELECT
+    		    sa.sessionid AS SID,
+    		    0::integer AS SERIAL#,
+    		    sa.usesysid AS USER#,
+    		    ad.rolname AS USERNAME
+    	        FROM pg_stat_get_activity(NULL) AS sa
+    	        LEFT JOIN pg_authid ad ON(sa.usesysid = ad.oid)
+    	        WHERE sa.application_name <> 'JobScheduler';
+    postgres=# SELECT COUNT(*) FROM DV_SESSIONS WHERE USERNAME='omm';
+    
+     count
+    -------
+         1
+    (1 row)
+

View the maximum number of sessions connected to a specific database.

+

Run the following commands to view the upper limit of the number of postgres's session connections. -1 indicates that no upper limit is set for the number of postgres's session connections.

+
postgres=# SELECT DATNAME,DATCONNLIMIT FROM PG_DATABASE WHERE DATNAME='postgres';
+    
+     datname  | datconnlimit
+    ----------+--------------
+     postgres |           -1
+    (1 row)
+

View the number of session connections that have been used by a specific database.

+

Run the following commands to view the number of session connections that have been used by postgres. 1 indicates the number of session connections that have been used by postgres.

+
postgres=# SELECT COUNT(*) FROM PG_STAT_ACTIVITY WHERE DATNAME='postgres';
+     count 
+    -------
+         1
+    (1 row)
+

View the number of session connections that have been used by all users.

+

Run the following commands to view the number of session connections that have been used by all users:

+
postgres=# CREATE OR REPLACE VIEW DV_SESSIONS AS
+    	       SELECT
+    		    sa.sessionid AS SID,
+    		    0::integer AS SERIAL#,
+    		    sa.usesysid AS USER#,
+    		    ad.rolname AS USERNAME
+    	        FROM pg_stat_get_activity(NULL) AS sa
+    	        LEFT JOIN pg_authid ad ON(sa.usesysid = ad.oid)
+    	        WHERE sa.application_name <> 'JobScheduler';
+    postgres=# SELECT COUNT(*) FROM DV_SESSIONS;
+     count
+    -------
+         10
+    (1 row)
+
+ + diff --git a/content/en/docs/Developerguide/checkpoint-22.md b/content/en/docs/Developerguide/checkpoint-22.md new file mode 100644 index 000000000..95d05edd3 --- /dev/null +++ b/content/en/docs/Developerguide/checkpoint-22.md @@ -0,0 +1,30 @@ +# Checkpoint + +ELI–OK + +In openGauss/GaussDB the Checkpoints are points in the sequence of transactions at which it is guaranteed that the heap and index data files have been updated with all information written before the checkpoint. + +At checkpoint time, all dirty data pages are flushed to disk and a special checkpoint record is written to the log file. + +The MOT does not store its data like openGauss/GaussDB does and there is no dirty pages concept. The data is stored directly in memory. + +For this reason we have researched and implemented the CALC algorithm described in the paper Low-Overhead Asynchronous Checkpointing in Main-Memory Database Systems, SIGMOND 2016 from Yale University. + +Reference to footnote: + +K. Ren, T. Diamond, D. J. Abadi, and A. Thomson. Low-overhead asynchronous checkpointing in main-memory database systems. In Proceedings of the 2016 ACM SIGMOD International Conference on Management of Data, 2016. + +## CALC Checkpoint algorithm: low overhead in memory and compute + +The checkpoint algorithm provides the following benefits – + +- Reduced memory usage: At most two copies of each record are stored at any time. Memory usage is minimized by only storing one physical copy of a record when its live and stable versions are equal or when no checkpoint is actively being recorded. +- Low overhead. CALC's overhead is smaller than other asynchronous checkpointing algorithms. +- Uses virtual points of consistency. CALC does not require quiescing of the database in order to achieve a physical point of consistency. + +How to activate manually the checkpoint? + +MOT checkpoints are integrated into the envelope’s checkpoint mechanism. The process can be triggered manually by executing “**CHECKPOINT;**” command or by automatically considering the envelope’s triggering settings \(time/size\). + +Checkpoint configuration is done in the mot.conf file – see the relevant [Default MOT.conf](default-mot-conf.md#EN-US_TOPIC_0257867375). + diff --git a/content/en/docs/Developerguide/checkpoint-32.md b/content/en/docs/Developerguide/checkpoint-32.md new file mode 100644 index 000000000..1322fb1a8 --- /dev/null +++ b/content/en/docs/Developerguide/checkpoint-32.md @@ -0,0 +1,30 @@ +# CHECKPOINT + +## Function + +A checkpoint is a point in the transaction log sequence at which all data files have been updated to reflect the information in the log. All data files will be flushed to a disk. + +**CHECKPOINT** forces a transaction log checkpoint. By default, WALs periodically specify checkpoints in a transaction log. You may use **gs\_guc** to specify run-time parameters **checkpoint\_segments** and **checkpoint\_timeout** to adjust the atomized checkpoint intervals. + +## Precautions + +- Only a system administrator has the permission to call **CHECKPOINT**. +- **CHECKPOINT** forces an immediate checkpoint when the related command is issued, without waiting for a regular checkpoint scheduled by the system. + +## Syntax + +``` +CHECKPOINT; +``` + +## Parameter Description + +None + +## Examples + +``` +-- Set a checkpoint. +postgres=# CHECKPOINT; +``` + diff --git a/content/en/docs/Developerguide/checkpoint.md b/content/en/docs/Developerguide/checkpoint.md new file mode 100644 index 000000000..b67437316 --- /dev/null +++ b/content/en/docs/Developerguide/checkpoint.md @@ -0,0 +1,30 @@ +# CHECKPOINT + +## Function + +A checkpoint is a point in the transaction log sequence at which all data files have been updated to reflect the information in the log. All data files will be flushed to a disk. + +**CHECKPOINT** forces a transaction log checkpoint. By default, WALs periodically specify checkpoints in a transaction log. You may use **gs\_guc** to specify run-time parameters **checkpoint\_segments** and **checkpoint\_timeout** to adjust the atomized checkpoint intervals. + +## Precautions + +- Only a system administrator has the permission to call **CHECKPOINT**. +- **CHECKPOINT** forces an immediate checkpoint when the related command is issued, without waiting for a regular checkpoint scheduled by the system. + +## Syntax + +``` +CHECKPOINT; +``` + +## Parameter Description + +None + +## Examples + +``` +-- Set a checkpoint. +postgres=# CHECKPOINT; +``` + diff --git a/content/en/docs/Developerguide/checkpoints-41.md b/content/en/docs/Developerguide/checkpoints-41.md new file mode 100644 index 000000000..84520e9e2 --- /dev/null +++ b/content/en/docs/Developerguide/checkpoints-41.md @@ -0,0 +1,111 @@ +# Checkpoints + +## checkpoint\_segments + +**Parameter description**: Specifies the minimum number of WAL segment files in the period specified by **[checkpoint\_timeout](#en-us_topic_0237124708_en-us_topic_0059778936_s880baa9f9b594980afbbe95fb8a77182)**. The size of each log file is 16 MB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer. The minimum value is **1**. + +Increasing the value of this parameter speeds up the export of a large amount of data. Set this parameter based on **[checkpoint\_timeout](#en-us_topic_0237124708_en-us_topic_0059778936_s880baa9f9b594980afbbe95fb8a77182)** and **[shared\_buffers](memory-40.md#en-us_topic_0237124699_en-us_topic_0059777577_s55a43fb6d0464430a59031671b37cd07)**. This parameter affects the number of WAL segment files that can be reused. Generally, the maximum number of reused files in the **pg\_xlog** folder is twice the number of **checkpoint\_segments**. The reused files are not deleted and are renamed to the WAL segment files which will be later used. + +**Default value**: **64** + +## checkpoint\_timeout + +**Parameter description**: Specifies the maximum time between automatic WAL checkpoints. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range:** an integer ranging from 30 to 3600. The unit is second. + +If the value of **[checkpoint\_segments](#en-us_topic_0237124708_en-us_topic_0059778936_sbadc77895e6643b882a5e7557e405373)** is increased, you need to increase the value of this parameter. The increase of these two parameters further requires the increase of **[shared\_buffers](memory-40.md#en-us_topic_0237124699_en-us_topic_0059777577_s55a43fb6d0464430a59031671b37cd07)**. Consider all these parameters during setting. + +**Default value**: **15min** + +## checkpoint\_completion\_target + +**Parameter description**: Specifies the completion target of each checkpoint, as a fraction of total time between checkpoints. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a floating point number ranging from 0.0 to 1.0 + +**Default value**: **0.5** + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>**0.5** indicates that each checkpoint should be complete within 50% of the interval between checkpoints. + +## checkpoint\_warning + +**Parameter description**: Specifies a time in seconds. If the checkpoint interval is close to this time due to filling of checkpoint segment files, a message is sent to the server log to suggest an increase in the **[checkpoint\_segments](#en-us_topic_0237124708_en-us_topic_0059778936_sbadc77895e6643b882a5e7557e405373)** value. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to _INT\_MAX_. The unit is second. **0** indicates that the warning is disabled. + +**Default value**: **5min** + +**Recommended value**: **5min** + +## checkpoint\_wait\_timeout + +**Parameter description**: Sets the longest time that the checkpoint waits for the checkpointer thread to start. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 2 to 3600. The unit is second. + +**Default value**: **1min** + +## enable\_incremental\_checkpoint + +**Parameter description**: Specifies whether to enable incremental checkpointing. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **on** + +## enable\_double\_write + +**Parameter description**: Specifies whether to enable the doublewrite buffer. When the incremental checkpointing is enabled, the doublewrite buffer instead of **full\_page\_writes** is used to prevent partial page writes. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **on** + +## incremental\_checkpoint\_timeout + +**Parameter description**: Specifies the maximum interval between automatic WAL checkpoints when the incremental checkpointing is enabled. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range:** an integer ranging from 1 to 3600. The unit is second. + +**Default value**: **1min** + +## enable\_xlog\_prune + +**Parameter description**: Specifies whether the primary server reclaims logs when the size of an .xlog file exceeds the value of **max\_size\_for\_xlog\_prune** when any standby server is disconnected. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **on** + +## max\_size\_for\_xlog\_prune + +**Parameter description**: This parameter is valid only when **enable\_xlog\_prune** is enabled. If a standby node is disconnected and the size of an .xlog file is greater than the threshold, the .xlog file is reclaimed. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer from 0 to 576460752303423487. The unit is MB. + +**Default value**: **100000**. Unit: MB + diff --git a/content/en/docs/Developerguide/checkpoints.md b/content/en/docs/Developerguide/checkpoints.md new file mode 100644 index 000000000..e312b1c3d --- /dev/null +++ b/content/en/docs/Developerguide/checkpoints.md @@ -0,0 +1,111 @@ +# Checkpoints + +## checkpoint\_segments + +**Parameter description**: Specifies the minimum number of WAL segment files in the period specified by **[checkpoint\_timeout](#en-us_topic_0237124708_en-us_topic_0059778936_s880baa9f9b594980afbbe95fb8a77182)**. The size of each log file is 16 MB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer. The minimum value is **1**. + +Increasing the value of this parameter speeds up the export of a large amount of data. Set this parameter based on **[checkpoint\_timeout](#en-us_topic_0237124708_en-us_topic_0059778936_s880baa9f9b594980afbbe95fb8a77182)** and **[shared\_buffers](memory-26.md#en-us_topic_0237124699_en-us_topic_0059777577_s55a43fb6d0464430a59031671b37cd07)**. This parameter affects the number of WAL segment files that can be reused. Generally, the maximum number of reused files in the **pg\_xlog** folder is twice the number of **checkpoint\_segments**. The reused files are not deleted and are renamed to the WAL segment files which will be later used. + +**Default value**: **64** + +## checkpoint\_timeout + +**Parameter description**: Specifies the maximum time between automatic WAL checkpoints. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range:** an integer ranging from 30 to 3600. The unit is second. + +If the value of **[checkpoint\_segments](#en-us_topic_0237124708_en-us_topic_0059778936_sbadc77895e6643b882a5e7557e405373)** is increased, you need to increase the value of this parameter. The increase of these two parameters further requires the increase of **[shared\_buffers](memory-26.md#en-us_topic_0237124699_en-us_topic_0059777577_s55a43fb6d0464430a59031671b37cd07)**. Consider all these parameters during setting. + +**Default value**: **15min** + +## checkpoint\_completion\_target + +**Parameter description**: Specifies the completion target of each checkpoint, as a fraction of total time between checkpoints. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a floating point number ranging from 0.0 to 1.0 + +**Default value**: **0.5** + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>**0.5** indicates that each checkpoint should be complete within 50% of the interval between checkpoints. + +## checkpoint\_warning + +**Parameter description**: Specifies a time in seconds. If the checkpoint interval is close to this time due to filling of checkpoint segment files, a message is sent to the server log to suggest an increase in the **[checkpoint\_segments](#en-us_topic_0237124708_en-us_topic_0059778936_sbadc77895e6643b882a5e7557e405373)** value. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to _INT\_MAX_. The unit is second. **0** indicates that the warning is disabled. + +**Default value**: **5min** + +**Recommended value**: **5min** + +## checkpoint\_wait\_timeout + +**Parameter description**: Sets the longest time that the checkpoint waits for the checkpointer thread to start. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 2 to 3600. The unit is second. + +**Default value**: **1min** + +## enable\_incremental\_checkpoint + +**Parameter description**: Specifies whether to enable incremental checkpointing. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **on** + +## enable\_double\_write + +**Parameter description**: Specifies whether to enable the doublewrite buffer. When the incremental checkpointing is enabled, the doublewrite buffer instead of **full\_page\_writes** is used to prevent partial page writes. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **on** + +## incremental\_checkpoint\_timeout + +**Parameter description**: Specifies the maximum interval between automatic WAL checkpoints when the incremental checkpointing is enabled. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range:** an integer ranging from 1 to 3600. The unit is second. + +**Default value**: **1min** + +## enable\_xlog\_prune + +**Parameter description**: Specifies whether the primary server reclaims logs when the size of an .xlog file exceeds the value of **max\_size\_for\_xlog\_prune** when any standby server is disconnected. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **on** + +## max\_size\_for\_xlog\_prune + +**Parameter description**: This parameter is valid only when **enable\_xlog\_prune** is enabled. If a standby node is disconnected and the size of an .xlog file is greater than the threshold, the .xlog file is reclaimed. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer from 0 to 576460752303423487. The unit is MB. + +**Default value**: **100000**. Unit: MB + diff --git a/content/en/docs/Developerguide/class_vital_info.md b/content/en/docs/Developerguide/class_vital_info.md new file mode 100644 index 000000000..a3a254976 --- /dev/null +++ b/content/en/docs/Developerguide/class_vital_info.md @@ -0,0 +1,47 @@ +# CLASS\_VITAL\_INFO + +**CLASS\_VITAL\_INFO** is used to check whether the OIDs of the same table or index are consistent for WDR snapshots. + +**Table 1** CLASS\_VITAL\_INFO columns + + + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name

+

relname

+

name

+

Table name

+

relkind

+

"char"

+
Object type. Its value can be:
  • r: ordinary table
  • t: TOAST table
  • i: index
+
+
+ diff --git a/content/en/docs/Developerguide/client-access-authentication.md b/content/en/docs/Developerguide/client-access-authentication.md new file mode 100644 index 000000000..3a56f3585 --- /dev/null +++ b/content/en/docs/Developerguide/client-access-authentication.md @@ -0,0 +1,15 @@ +# Client Access Authentication + +- **[Configuring Client Access Authentication](configuring-client-access-authentication.md)** + +- **[Configuration File Reference](configuration-file-reference.md)** + +- **[Establishing Secure TCP/IP Connections in SSL Mode](establishing-secure-tcp-ip-connections-in-ssl-mode.md)** + +- **[Establishing Secure TCP/IP Connections in SSH Tunnel Mode](establishing-secure-tcp-ip-connections-in-ssh-tunnel-mode.md)** + +- **[Checking the Number of Database Connections](checking-the-number-of-database-connections.md)** + +- **[Managing SSL Certificates](managing-ssl-certificates.md)** + + diff --git a/content/en/docs/Developerguide/close.md b/content/en/docs/Developerguide/close.md new file mode 100644 index 000000000..39f1ff6fe --- /dev/null +++ b/content/en/docs/Developerguide/close.md @@ -0,0 +1,40 @@ +# CLOSE + +## Function + +**CLOSE** frees the resources associated with an open cursor. + +## Precautions + +- After a cursor is closed, no subsequent operations are allowed on it. +- A cursor should be closed when it is no longer needed. +- Every non-holdable open cursor is implicitly closed when a transaction is terminated by **COMMIT** or **ROLLBACK**. +- A holdable cursor is implicitly closed if the transaction that created it aborts by **ROLLBACK**. +- If the cursor creation transaction is successfully committed, the holdable cursor remains open until an explicit **CLOSE** operation is executed, or the client disconnects. +- openGauss does not have an explicit **OPEN** cursor statement. A cursor is considered open when it is declared. You can view all available cursors by querying the **pg\_cursors** system view. + +## Syntax + +``` +CLOSE { cursor_name | ALL } ; +``` + +## Parameter Description + +- **cursor\_name** + + Specifies the name of a cursor to be closed. + +- **ALL** + + Closes all open cursors. + + +## Examples + +See [Examples](fetch.md#en-us_topic_0237122165_en-us_topic_0059778422_s1ee72832a27547e4949061a010e24578) in **FETCH**. + +## Helpful Links + +[FETCH](fetch.md) and [MOVE](move.md) + diff --git a/content/en/docs/Developerguide/closing-a-connection.md b/content/en/docs/Developerguide/closing-a-connection.md new file mode 100644 index 000000000..5b4ec92c5 --- /dev/null +++ b/content/en/docs/Developerguide/closing-a-connection.md @@ -0,0 +1,6 @@ +# Closing a Connection + +After you complete required data operations in the database, close the database connection. + +Call the close method to close the connection, for example, **Connection conn = null; conn.close\(\)**. + diff --git a/content/en/docs/Developerguide/cluster.md b/content/en/docs/Developerguide/cluster.md new file mode 100644 index 000000000..ec33da096 --- /dev/null +++ b/content/en/docs/Developerguide/cluster.md @@ -0,0 +1,124 @@ +# CLUSTER + +## Function + +**CLUSTER** is used to cluster a table based on an index. + +**CLUSTER** instructs openGauss to cluster the table specified by **table\_name** based on the index specified by **index\_name**. The index must have been defined by **table\_name**. + +When a table is clustered, it is physically reordered based on the index information. Clustering is a one-time operation. When the table is subsequently updated, the changes are not clustered. That is, no attempt is made to store new or updated rows according to their index order. + +When a table is clustered, openGauss records which index the table was clustered by. The form **CLUSTER table\_name** reclusters the table using the same index as before. You can also use the **CLUSTER** or **SET WITHOUT CLUSTER** form of **ALTER TABLE** to set the index to be used for future cluster operations, or to clear any previous settings. + +**CLUSTER** without any parameter reclusters all the previously-clustered tables in the current database that the calling user owns, or all such tables if called by an administrator. + +When a table is being clustered, an **ACCESS EXCLUSIVE** lock is acquired on it. This prevents any other database operations \(both read and write\) from being performed on the table until the **CLUSTER** is finished. + +## Precautions + +Only row-store B-tree indexes support **CLUSTER**. + +In the case where you are accessing single rows randomly within a table, the actual order of the data in the table is unimportant. However, if you tend to access some data more than others, and there is an index that groups them together, it is helpful by using **CLUSTER**. If you are requesting a range of indexed values from a table, or a single indexed value that has multiple rows that match, **CLUSTER** will help because once the index identifies the table page for the first row that matches, all other rows that match are probably already on the same table page, and so you save disk accesses and speed up the query. + +When an index scan is used, a temporary copy of the table is created that contains the table data in the index order. Temporary copies of each index on the table are created as well. Therefore, you need free space on disk at least equal to the sum of the table size and the total index size. + +Because **CLUSTER** remembers which indexes are clustered, one can cluster the tables manually the first time, then set up a time like **VACUUM** without any parameters, so that the desired tables are periodically reclustered. + +Because the optimizer records statistics about the ordering of tables, it is advisable to run **ANALYZE** on the newly clustered table. Otherwise, the optimizer might make poor choices of query plans. + +**CLUSTER** cannot be executed in transactions. + +## Syntax + +- Cluster a table. + + ``` + CLUSTER [ VERBOSE ] table_name [ USING index_name ]; + ``` + +- Cluster a partition. + + ``` + CLUSTER [ VERBOSE ] table_name PARTITION ( partition_name ) [ USING index_name ]; + ``` + +- Recluster a table. + + ``` + CLUSTER [ VERBOSE ]; + ``` + + +## Parameter Description + +- **VERBOSE** + + Enables the display of progress messages. + +- **table\_name** + + Specifies the table name. + + Value range: an existing table name + +- **index\_name** + + Specifies the index name. + + Value range: an existing index name + +- **partition\_name** + + Specifies the partition name. + + Value range: an existing partition name + + +## Examples + +``` +-- Create a partitioned table. +postgres=# CREATE TABLE tpcds.inventory_p1 +( + INV_DATE_SK INTEGER NOT NULL, + INV_ITEM_SK INTEGER NOT NULL, + INV_WAREHOUSE_SK INTEGER NOT NULL, + INV_QUANTITY_ON_HAND INTEGER +) +PARTITION BY RANGE(INV_DATE_SK) +( + PARTITION P1 VALUES LESS THAN(2451179), + PARTITION P2 VALUES LESS THAN(2451544), + PARTITION P3 VALUES LESS THAN(2451910), + PARTITION P4 VALUES LESS THAN(2452275), + PARTITION P5 VALUES LESS THAN(2452640), + PARTITION P6 VALUES LESS THAN(2453005), + PARTITION P7 VALUES LESS THAN(MAXVALUE) +); + +-- Create an index named ds_inventory_p1_index1. +postgres=# CREATE INDEX ds_inventory_p1_index1 ON tpcds.inventory_p1 (INV_ITEM_SK) LOCAL; + +-- Cluster the tpcds.inventory_p1 table. +postgres=# CLUSTER tpcds.inventory_p1 USING ds_inventory_p1_index1; + +-- Cluster the p3 partition. +postgres=# CLUSTER tpcds.inventory_p1 PARTITION (p3) USING ds_inventory_p1_index1; + +-- Cluster the tables that can be clustered in the database. +postgres=# CLUSTER; + +-- Delete the index. +postgres=# DROP INDEX tpcds.ds_inventory_p1_index1; + +-- Drop the partitioned table. +postgres=# DROP TABLE tpcds.inventory_p1; +``` + +## Suggestions + +- cluster + - It is recommended that you run **ANALYZE** on a newly clustered table. Otherwise, the optimizer might make poor choices of query plans. + - **CLUSTER** cannot be executed in transactions. + + diff --git a/content/en/docs/Developerguide/command-reference-13.md b/content/en/docs/Developerguide/command-reference-13.md new file mode 100644 index 000000000..d2f1f737c --- /dev/null +++ b/content/en/docs/Developerguide/command-reference-13.md @@ -0,0 +1,44 @@ +# Command Reference + +**Table 1** Common parameters + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Value Range

+

--train

+

Specifies the path of historical logs used for training.

+

-

+

--model

+

Indicates the path for storing the model and intermediate file generated during training and the model path specified during prediction.

+

-

+

--predict

+

Specifies the path of the load file to be predicted.

+

-

+

--ratio

+

Specifies the recommended retraining threshold ratio. This parameter is optional.

+

-

+
+ diff --git a/content/en/docs/Developerguide/command-reference.md b/content/en/docs/Developerguide/command-reference.md new file mode 100644 index 000000000..2db25e908 --- /dev/null +++ b/content/en/docs/Developerguide/command-reference.md @@ -0,0 +1,100 @@ +# Command Reference + +**Table 1** Common parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Value Range

+

--mode, -m

+

Specifies the running mode of the tuning program.

+

train, tune

+

--config-file, -f

+

Specifies the configuration file of the tuning program. This parameter is optional.

+

-

+

--db-name

+

Specifies the database to be tuned.

+

-

+

--db-user

+

Specifies the username of the database to be tuned.

+

-

+

--port

+

Specifies the database listening port.

+

-

+

--host

+

Specifies host IP address of the database instance.

+

-

+

--host-user

+

Specifies the username of the database administrator during database installation.

+

-

+

--host-ssh-port

+

Specifies the SSH port number of the host where the database instance is located. This parameter is optional.

+

-

+

--scenario

+

Specifies the tuning mode, which corresponds to three tuning lists. You can modify the lists.

+

ap, htap, tp

+

--benchmark

+

Specifies the name of the benchmark script file.

+

-

+

--model-path

+

Specifies the path for storing or loading tuning (enhanced learning) models.

+

-

+

--version, -v

+

Returns the current tool version.

+

-

+
+ diff --git a/content/en/docs/Developerguide/comment.md b/content/en/docs/Developerguide/comment.md new file mode 100644 index 000000000..e2588aa92 --- /dev/null +++ b/content/en/docs/Developerguide/comment.md @@ -0,0 +1,148 @@ +# COMMENT + +## Function + +**COMMENT** defines or changes the comment of an object. + +## Precautions + +- Each object stores only one comment. Therefore, you need to modify a comment and issue a new **COMMENT** command to the same object. To delete the comment, write **NULL** at the position of the text string. When an object is deleted, the comment is automatically deleted. +- Currently, there is no security protection for viewing comments. Any user connected to a database can view all the comments for objects in the database. For shared objects such as databases, roles, and tablespaces, comments are stored globally so any user connected to any database in the cluster can see all the comments for shared objects. Therefore, do not put security-critical information in comments. +- For most objects, only the owner of the object can set comments. Roles do not have owners, so the rule for **COMMENT ON ROLE** is that you must be an administrator to comment on an administrator role, or have the **CREATEROLE** permission to comment on non-administrator roles. A system administrator can comment on all objects. + +## Syntax + +``` +COMMENT ON +{ + AGGREGATE agg_name (agg_type [, ...] ) | + CAST (source_type AS target_type) | + COLLATION object_name | + COLUMN { table_name.column_name | view_name.column_name } | + CONSTRAINT constraint_name ON table_name | + CONVERSION object_name | + DATABASE object_name | + DOMAIN object_name | + EXTENSION object_name | + FOREIGN DATA WRAPPER object_name | + FOREIGN TABLE object_name | + FUNCTION function_name ( [ {[ argmode ] [ argname ] argtype} [, ...] ] ) | + INDEX object_name | + LARGE OBJECT large_object_oid | + OPERATOR operator_name (left_type, right_type) | + OPERATOR CLASS object_name USING index_method | + OPERATOR FAMILY object_name USING index_method | + [ PROCEDURAL ] LANGUAGE object_name | + ROLE object_name | + SCHEMA object_name | + SERVER object_name | + TABLE object_name | + TABLESPACE object_name | + TEXT SEARCH CONFIGURATION object_name | + TEXT SEARCH DICTIONARY object_name | + TEXT SEARCH PARSER object_name | + TEXT SEARCH TEMPLATE object_name | + TYPE object_name | + VIEW object_name +} + IS 'text'; +``` + +## Parameter Description + +- **agg\_name** + + Specifies the new name of an aggregation function. + +- **agg\_type** + + Specifies the data type of the aggregation function parameters. + +- **source\_type** + + Specifies the source data type of the cast. + +- **target\_type** + + Specifies the target data type of the cast. + +- **object\_name** + + Specifies the name of an object. + +- **table\_name.column\_name** + + **view\_name.column\_name** + + Specifies the column whose comment is defined or modified. You can add the table name or view name as the prefix. + +- **constraint\_name** + + Specifies the table constraint whose comment is defined or modified. + +- **table\_name** + + Specifies the name of a table. + +- **function\_name** + + Specifies the function whose comment is defined or modified. + +- **argmode,argname,argtype** + + Specifies the schema, name, and type of the function parameters. + +- **large\_object\_oid** + + Specifies the OID of the large object whose comment is defined or modified. + +- **operator\_name** + + Specifies the name of the operator. + +- **left\_type,right\_type** + + Specifies the data type of the operator parameters \(optionally schema-qualified\). If the prefix or suffix operator does not exist, the **NONE** option can be added. + +- **text** + + Specifies the comment content. + + +## Examples + +``` +postgres=# CREATE TABLE tpcds.customer_demographics_t2 +( + CD_DEMO_SK INTEGER NOT NULL, + CD_GENDER CHAR(1) , + CD_MARITAL_STATUS CHAR(1) , + CD_EDUCATION_STATUS CHAR(20) , + CD_PURCHASE_ESTIMATE INTEGER , + CD_CREDIT_RATING CHAR(10) , + CD_DEP_COUNT INTEGER , + CD_DEP_EMPLOYED_COUNT INTEGER , + CD_DEP_COLLEGE_COUNT INTEGER +) +WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE) +; + +-- Comment out the tpcds.customer_demographics_t2.cd_demo_sk column. +postgres=# COMMENT ON COLUMN tpcds.customer_demographics_t2.cd_demo_sk IS 'Primary key of customer demographics table.'; + +-- Create a view consisting of rows with c_customer_sk less than 150. +postgres=# CREATE VIEW tpcds.customer_details_view_v2 AS + SELECT * + FROM tpcds.customer + WHERE c_customer_sk < 150; + +-- Comment out the tpcds.customer_details_view_v2 view. +postgres=# COMMENT ON VIEW tpcds.customer_details_view_v2 IS 'View of customer detail'; + +-- Delete the view. +postgres=# DROP VIEW tpcds.customer_details_view_v2; + +-- Delete the tpcds.customer_demographics_t2 table. +postgres=# DROP TABLE tpcds.customer_demographics_t2; +``` + diff --git a/content/en/docs/Developerguide/commissioning.md b/content/en/docs/Developerguide/commissioning.md new file mode 100644 index 000000000..396ea15fd --- /dev/null +++ b/content/en/docs/Developerguide/commissioning.md @@ -0,0 +1,160 @@ +# Commissioning + +To control the output of log files and better understand the operating status of the database, modify specific configuration parameters in the **postgresql.conf** file in the instance data directory. + +[Table 1](#en-us_topic_0237120444_en-us_topic_0059779333_tec23904511dd4695b8b01f8c7c04563a) describes the adjustable configuration parameters. + +**Table 1** Configuration parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Value Range

+

Remarks

+

client_min_messages

+

Level of messages to be sent to clients.

+
  • DEBUG5
  • DEBUG4
  • DEBUG3
  • DEBUG2
  • DEBUG1
  • LOG
  • NOTICE
  • WARNING
  • ERROR
  • FATAL
  • PANIC
+

Default value: NOTICE

+

Messages of the set level or lower will be sent to clients. The lower the level is, the fewer the messages will be sent.

+

log_min_messages

+

Level of messages to be recorded in server logs.

+
  • DEBUG5
  • DEBUG4
  • DEBUG3
  • DEBUG2
  • DEBUG1
  • INFO
  • NOTICE
  • WARNING
  • ERROR
  • LOG
  • FATAL
  • PANIC
+

Default value: WARNING

+

Messages higher than the set level will be recorded in logs. The higher the level is, the fewer the server logs will be recorded.

+

log_min_error_statement

+

Level of SQL error statements to be recorded in server logs.

+
  • DEBUG5
  • DEBUG4
  • DEBUG3
  • DEBUG2
  • DEBUG1
  • INFO
  • NOTICE
  • WARNING
  • ERROR
  • FATAL
  • PANIC
+

Default value: ERROR

+

SQL error statements of the set level or higher will be recorded in server logs.

+

Only a system administrator is allowed to modify this parameter.

+

log_min_duration_statement

+

Minimum execution duration of a statement. If the execution duration of a statement is equal to or longer than the set milliseconds, the statement and its duration will be recorded in logs. Enabling this function can help you track the query attempts to be optimized.

+

INT type

+

Default value: -1

+

Unit: millisecond

+

The default value (-1) indicates that the function is disabled.

+

Only a system administrator is allowed to modify this parameter.

+

log_connections/log_disconnections

+

Whether to record a server log message when each session is connected or disconnected.

+
  • on: The system records a log server when each session is connected or disconnected.
  • off: The system does not record a log server when each session is connected or disconnected.
+

Default value: off

+

-

+

log_duration

+

Whether to record the duration of each executed statement.

+
  • on: The system records the duration of each executed statement.
  • off: The system does not record the duration of each executed statement.
+

Default value: off

+

Only a system administrator is allowed to modify this parameter.

+

log_statement

+

SQL statements to be recorded in logs.

+
  • none: The system does not record any SQL statements.
  • ddl: The system records data definition statements.
  • mod: The system records data definition statements and data operation statements.
  • all: The system records all statements.
+

Default value: none

+

Only a system administrator is allowed to modify this parameter.

+

log_hostname

+

Whether to record host names.

+
  • on: The system records host names.
  • off: The system does not record host names.
+

Default value: off

+

By default, connection logs only record the IP addresses of connected hosts. With this function, the host names will also be recorded.

+

This parameter affects parameters in Querying Audit Results, GS_WLM_SESSION_HISTORY, PG_STAT_ACTIVITY, and log_line_prefix.

+
+ +[Table 2](#en-us_topic_0237120444_en-us_topic_0059779333_t3c729a4a94d145c7bdc4ca788236d8a7) describes the preceding parameter levels. + +**Table 2** Description of log level parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Level

+

Description

+

DEBUG[1-5]

+

Provides information that can be used by developers. Level 1 is the lowest level whereas level 5 is the highest level.

+

INFO

+

Provides information about users' hidden requests, for example, information about the VACUUM VERBOSE process.

+

NOTICE

+

Provides information that may be important to users, for example, truncations of long identifiers or indexes created as a part of a primary key.

+

WARNING

+

Provides warning information for users, for example, COMMIT out of transaction blocks.

+

ERROR

+

Reports an error that causes a command to terminate.

+

LOG

+

Reports information that administrators may be interested in, for example, the activity levels of check points.

+

FATAL

+

Reports the reason that causes a session to terminate.

+

PANIC

+

Reports the reason that causes all sessions to terminate.

+
+ diff --git a/content/en/docs/Developerguide/commit-end.md b/content/en/docs/Developerguide/commit-end.md new file mode 100644 index 000000000..37c8f5810 --- /dev/null +++ b/content/en/docs/Developerguide/commit-end.md @@ -0,0 +1,67 @@ +# COMMIT | END + +## Function + +**COMMIT** or **END** commits all operations of a transaction. + +## Precautions + +Only the creator of a transaction or a system administrator can run the **COMMIT** command. The creation and commit operations must be in different sessions. + +## Syntax + +``` +{ COMMIT | END } [ WORK | TRANSACTION ] ; +``` + +## Parameter Description + +- **COMMIT | END** + + Commits the current transaction and makes all changes made by the transaction become visible to others. + +- **WORK | TRANSACTION** + + Specifies an optional keyword, which has no effect except increasing readability. + + +## Examples + +``` +-- Create a table. +postgres=# CREATE TABLE tpcds.customer_demographics_t2 +( + CD_DEMO_SK INTEGER NOT NULL, + CD_GENDER CHAR(1) , + CD_MARITAL_STATUS CHAR(1) , + CD_EDUCATION_STATUS CHAR(20) , + CD_PURCHASE_ESTIMATE INTEGER , + CD_CREDIT_RATING CHAR(10) , + CD_DEP_COUNT INTEGER , + CD_DEP_EMPLOYED_COUNT INTEGER , + CD_DEP_COLLEGE_COUNT INTEGER +) +WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE) +; + +-- Start a transaction. +postgres=# START TRANSACTION; + +-- Insert data. +postgres=# INSERT INTO tpcds.customer_demographics_t2 VALUES(1,'M', 'U', 'DOCTOR DEGREE', 1200, 'GOOD', 1, 0, 0); +postgres=# INSERT INTO tpcds.customer_demographics_t2 VALUES(2,'F', 'U', 'MASTER DEGREE', 300, 'BAD', 1, 0, 0); + +-- Commit the transaction to make all changes permanent. +postgres=# COMMIT; + +-- Query data. +postgres=# SELECT * FROM tpcds.customer_demographics_t2; + +-- Delete the tpcds.customer_demographics_t2 table. +postgres=# DROP TABLE tpcds.customer_demographics_t2; +``` + +## Helpful Links + +[ROLLBACK](rollback.md) + diff --git a/content/en/docs/Developerguide/commit-prepared.md b/content/en/docs/Developerguide/commit-prepared.md new file mode 100644 index 000000000..decebe63d --- /dev/null +++ b/content/en/docs/Developerguide/commit-prepared.md @@ -0,0 +1,42 @@ +# COMMIT PREPARED + +## Function + +**COMMIT PREPARED** commits a prepared two-phase transaction. + +## Precautions + +- The function is only available in maintenance mode \(when GUC parameter **xc\_maintenance\_mode** is **on**\). Exercise caution when enabling the mode. It is used by maintenance engineers for troubleshooting. Common users should not use the mode. +- Only the transaction creators or system administrators can run the **COMMIT PREPARED** command. The creation and commit operations must be in different sessions. +- The transaction function is maintained automatically by the database, and should be not visible to users. + +## Syntax + +``` +COMMIT PREPARED transaction_id ; +COMMIT PREPARED transaction_id WITH CSN; +``` + +## Parameter Description + +- **transaction\_id** + + Specifies the identifier of the transaction to be committed. The identifier must be different from those for current prepared transactions. + + +- **CSN\(commit sequence number\)** + + Specifies the sequence number of the transaction to be committed. It is a 64-bit, incremental, unsigned number. + + +## Example + +``` +COMMIT PREPARED commits a transaction whose identifier is trans_test. +postgres=# COMMIT PREPARED 'trans_test'; +``` + +## Helpful Links + +[PREPARE TRANSACTION](prepare-transaction.md) and [ROLLBACK PREPARED](rollback-prepared.md) + diff --git a/content/en/docs/Developerguide/common-faults-and-identification.md b/content/en/docs/Developerguide/common-faults-and-identification.md new file mode 100644 index 000000000..5daadb10d --- /dev/null +++ b/content/en/docs/Developerguide/common-faults-and-identification.md @@ -0,0 +1,9 @@ +# Common Faults and Identification + +- **[Core Fault Locating](core-fault-locating.md)** + +- **[When the TPC-C is running and a disk to be injected is full, the TPC-C stops responding.](when-the-tpc-c-is-running-and-a-disk-to-be-injected-is-full-the-tpc-c-stops-responding.md)** + +- **[Standby Node in the Need Repair \(WAL\) State](standby-node-in-the-need-repair-(wal)-state.md)** + + diff --git a/content/en/docs/Developerguide/communication-library-parameters.md b/content/en/docs/Developerguide/communication-library-parameters.md new file mode 100644 index 000000000..c9614df6b --- /dev/null +++ b/content/en/docs/Developerguide/communication-library-parameters.md @@ -0,0 +1,215 @@ +# Communication Library Parameters + +This section describes parameter settings and value ranges for communication libraries. + +## tcp\_keepalives\_idle + +**Parameter description**: Specifies the interval for transmitting keepalive signals on an OS that supports the **TCP\_KEEPIDLE** socket option. If no keepalive signal is transmitted, the connection is in idle mode. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>- If the OS does not support **TCP\_KEEPIDLE**, set this parameter to **0**. +>- The parameter is ignored on an OS where connections are established using the Unix domain socket. + +**Value range:** 0 to 3600. The unit is s. + +**Default value**: **60** + +## tcp\_keepalives\_interval + +**Parameter description**: Specifies the response time before retransmission on an OS that supports the **TCP\_KEEPINTVL** socket option. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: 0 to 180. The unit is s. + +**Default value**: **30** + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>- If the OS does not support **TCP\_KEEPINTVL**, set this parameter to **0**. +>- The parameter is ignored on an OS where connections are established using the Unix domain socket. + +## tcp\_keepalives\_count + +**Parameter description**: Specifies the number of keepalive signals that can be waited before the openGauss server is disconnected from the client on an OS that supports the **TCP\_KEEPCNT** socket option. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>- If the OS does not support **TCP\_KEEPCNT**, set this parameter to **0**. +>- The parameter is ignored on an OS where connections are established using the Unix domain socket. + +**Value range**: 0 to 100. **0** indicates that the connection is immediately broken if openGauss does not receive a keepalived signal from the client. + +**Default value:** **20** + +## comm\_tcp\_mode + +**Parameter description**: Specifies whether the communication library uses the TCP or SCTP protocol to set up a data channel. The parameter setting takes effect after you restart openGauss. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>SCTP is no longer supported. This parameter is provided for compatibility, but its value is fixed at **on**. + +**Value range**: Boolean. If this parameter is set to **on**, TCP is used to connect to database nodes. + +**Default value**: **on** + +## comm\_sctp\_port + +**Parameter description**: Specifies the TCP or SCTP port used to listen for data packet channels by the TCP proxy communication library or SCTP communication library. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>This port number is automatically allocated during openGauss deployment. Do not change the parameter value. If the port number is incorrectly configured, the database communication fails. + +**Value range**: an integer ranging from 0 to 65535 + +**Default value**: **7000** + +## comm\_control\_port + +**Parameter description**: Specifies the TCP listening port used by the TCP proxy communication library or SCTP communication library. + +This parameter is a fixed INTERNAL parameter and cannot be modified. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>This port number is automatically allocated during openGauss deployment. Do not change the parameter value. If the port number is incorrectly configured, the database communication fails. + +**Value range**: an integer ranging from 0 to 65535 + +**Default value**: **7001** + +## comm\_max\_receiver + +**Parameter description**: Specifies the maximum number of receiving threads for the TCP proxy communication library or SCTP communication library. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1 to 50 + +**Default value:** **4** + +## comm\_quota\_size + +**Parameter description**: Specifies the maximum size of packets that can be consecutively sent by the TCP proxy communication library or SCTP communication library. When you use a 1GE NIC, a small value ranging from 20 KB to 40 KB is recommended. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 2048000. The default unit is KB. + +**Default value**: **1MB** + +## comm\_usable\_memory + +**Parameter description**: Specifies the maximum memory available for buffering on the TCP proxy communication library or SCTP communication library on a database node. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>This parameter must be set based on environment memory and the deployment method. If it is too large, an out-of-memory \(OOM\) exception may occur. If it is too small, the performance of the TCP proxy communication library or SCTP communication library may deteriorate. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 100 x 1024 to _INT\_MAX_/2. The default unit is KB. + +**Default value**: **4000MB** + +## comm\_memory\_pool + +**Parameter description**: Specifies the size of the memory pool resources that can be used by the TCP proxy communication library or SCTP communication library on a database node. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If the memory used by the communication library is small, set this parameter to a small value. Otherwise, set it to a large value. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 100 x 1024 to _INT\_MAX_/2. The default unit is KB. + +**Default value**: **2000MB** + +## comm\_memory\_pool\_percent + +**Parameter description**: Specifies the percentage of the memory pool resources that can be used by the TCP proxy communication library or SCTP communication library on a database node. This parameter is used to adaptively reserve memory used by the communication library. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If the memory used by the communication library is small, set this parameter to a small value. Otherwise, set it to a large value. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 100 + +**Default value**: **0** + +## comm\_no\_delay + +**Parameter description**: Specifies whether to use the **NO\_DELAY** attribute of the communication library connection. Restart openGauss for the setting to take effect. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If packet loss occurs in openGauss because a large number of packets are received per second, set this parameter to **off** so that small packets are combined into large packets for transmission to reduce the total number of packets. + +**Default value**: **off** + +## comm\_debug\_mode + +**Parameter description**: Specifies whether to enable the debug mode of the TCP proxy communication library or SCTP communication library, that is, whether to print logs about the communication layer. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If this parameter is set to **on**, a huge number of logs will be printed, adding extra overhead and reducing database performance. Therefore, set it to **on** only in debugging scenarios. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the debug logs of the communication library are printed. +- **off** indicates that the debug logs of the communication library are not printed. + +**Default value**: **off** + +## comm\_ackchk\_time + +**Parameter description**: Specifies the duration after which the communication library server automatically triggers ACK when no data packet is received. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 20000. The unit is ms. **0** indicates that automatic ACK triggering is disabled. + +**Default value**: **2000** + +## comm\_timer\_mode + +**Parameter description**: Specifies whether to enable the timer mode of the TCP proxy communication library or SCTP communication library, that is, whether to print timer logs in each phase of the communication layer. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If this parameter is set to **on**, a huge number of logs will be printed, adding extra overhead and reducing database performance. Therefore, set it to **on** only in debugging scenarios. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the timer logs of the communication library are printed. +- **off** indicates that the timer logs of the communication library are not printed. + +**Default value**: **off** + +## comm\_stat\_mode + +**Parameter description**: Specifies whether to enable the statistics mode of the TCP proxy communication library or SCTP communication library, that is, whether to print statistics about the communication layer. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If this parameter is set to **on**, a huge number of logs will be printed, adding extra overhead and reducing database performance. Therefore, set it to **on** only in debugging scenarios. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the statistics logs of the communication library are printed. +- **off** indicates that the statistics logs of the communication library are not printed. + +**Default value**: **off** + diff --git a/content/en/docs/Developerguide/comparison-disk-vs-mot.md b/content/en/docs/Developerguide/comparison-disk-vs-mot.md new file mode 100644 index 000000000..a2013c23d --- /dev/null +++ b/content/en/docs/Developerguide/comparison-disk-vs-mot.md @@ -0,0 +1,115 @@ +# Comparison – Disk vs. MOT + +The following table briefly compares the various features of a openGauss Disk-based storage engine and a MOT storage engine. + +**Table 1** Comparison – Disk vs. MOT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Feature

+

GaussDB Disk Store

+

GaussDB MOT Store

+

Intel x86 + Kunpeng ARM

+

Yes

+

Yes

+

SQL and Feature-set Coverage

+

100%

+

98%

+

Scale-out

+

Yes (GaussDB only)

+

Yes (GaussDB only)

+

Scale-up (Many-cores, NUMA)

+

Low efficiency

+

High Efficiency

+

Throughput

+

High

+

Extremely High

+

Latency

+

Low

+

Extremely Low

+

Distributed (Cluster Mode)

+

Yes

+

Yes

+

Isolation Levels

+
  • RC+SI
  • RR
  • Serializable
+
  • RC
  • RR
  • RC+SI (in V2 release)
+

Concurrency Control

+

Pessimistic

+

Optimistic

+

Data Capacity (Data + Index)

+

Unlimited

+

Limited to DRAM

+

Native Compilation

+

No

+

Yes

+

Replication, Recovery

+

Yes

+

Yes

+

Replication Options

+

2 (sync, async)

+

3 (sync, async, group-commit)

+
+ +**Legend** + +- RR = Repeatable Reads +- RC = Read Committed +- SI = Snapshot Isolation + diff --git a/content/en/docs/Developerguide/comparison-operators.md b/content/en/docs/Developerguide/comparison-operators.md new file mode 100644 index 000000000..3984dd234 --- /dev/null +++ b/content/en/docs/Developerguide/comparison-operators.md @@ -0,0 +1,52 @@ +# Comparison Operators + +Comparison operators are available for all data types and return Boolean values. + +All comparison operators are binary operators. Only data types that are the same or can be implicitly converted can be compared using comparison operators. + +[Table 1](#en-us_topic_0237121966_en-us_topic_0059777421_en-us_topic_0058965550_table65067702) describes comparison operators provided by openGauss. + +**Table 1** Comparison operations + + + + + + + + + + + + + + + + + + + + + + + + + +

Operator

+

Description

+

<

+

Less than

+

>

+

Greater than

+

<=

+

Less than or equal to

+

>=

+

Greater than or equal to

+

=

+

Equality

+

<> or !=

+

Inequality

+
+ +Comparison operators are available for all relevant data types. All comparison operators are binary operators that returned values of Boolean type. Expressions like **1 < 2 < 3** are invalid. \(Because there is no comparison operator to compare a Boolean value with **3**.\) + diff --git a/content/en/docs/Developerguide/compatibility-with-earlier-versions.md b/content/en/docs/Developerguide/compatibility-with-earlier-versions.md new file mode 100644 index 000000000..092bdaafe --- /dev/null +++ b/content/en/docs/Developerguide/compatibility-with-earlier-versions.md @@ -0,0 +1,127 @@ +# Compatibility with Earlier Versions + +This section describes the parameter control of the downward compatibility and external compatibility features of the openGauss database. A backward compatible database supports applications of earlier versions. This section describes parameters used for controlling backward compatibility of a database. + +## array\_nulls + +**Parameter description**: controls whether the array input parser recognizes unquoted NULL as a null array element. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +- **on** indicates that null values can be entered in arrays. +- **off** indicates backward compatibility with the old behavior. Arrays containing **NULL** values can still be created when this parameter is set to **off**. + +**Default value**: **on** + +## backslash\_quote + +**Parameter description**: controls whether a single quotation mark can be represented by \\' in a string text. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>When the string text meets the SQL standards, \\ has no other meanings. This parameter only affects the handling of non-standard-conforming string texts, including escape string syntax \(E'...'\). + +**Value range**: enumerated values + +- **on** indicates that the use of \\' is always allowed. +- **off** indicates that the use of \\' is rejected. +- **safe\_encoding** indicates that the use of \\' is allowed only when client encoding does not allow ASCII \\ within a multibyte character. + +**Default value**: **safe\_encoding** + +## escape\_string\_warning + +**Parameter description**: specifies a warning on directly using a backslash \(\\\) as an escape in an ordinary string. + +- Applications that wish to use a backslash \(\\\) as an escape need to be modified to use escape string syntax \(E'...'\). This is because the default behavior of ordinary strings is now to treat the backslash as an ordinary character in each SQL standard. +- This variable can be enabled to help locate codes that need to be changed. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +**Default value**: **on** + +## lo\_compat\_privileges + +**Parameter description**: Specifies whether to enable backward compatibility for the privilege check of large objects. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +**on** indicates that the privilege check is disabled when users read or modify large objects. This setting is compatible with versions earlier than PostgreSQL 9.0. + +**Default value**: **off** + +## quote\_all\_identifiers + +**Parameter description:** When the database generates SQL, this parameter forcibly quotes all identifiers even if they are not keywords. This will affect the output of EXPLAIN as well as the results of functions, such as pg\_get\_viewdef. For details, see the **--quote-all-identifiers** parameter of **gs\_dump**. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +- **on** indicates the forcible quotation function is enabled. +- **off** indicates the forcible quotation function is disabled. + +**Default value**: **off** + +## sql\_inheritance + +**Parameter description**: Specifies whether to inherit semantics. This parameter specifies the access policy of descendant tables. **off** indicates that subtables cannot be accessed by commands. That is, the ONLY keyword is used by default. This setting is compatible with earlier versions. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +- **on** indicates that the subtable can be accessed. +- **off** indicates that the subtable cannot be accessed. + +**Default value**: **on** + +## standard\_conforming\_strings + +**Parameter description**: Specifies whether ordinary string texts \('...'\) treat backslashes as ordinary texts as specified in the SQL standard. + +- Applications can check this parameter to determine how string texts will be processed. +- It is recommended that characters be escaped by using the escape string syntax \(E'...'\). + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +- **on** indicates that the function is enabled. +- **off** indicates that the function is disabled. + +**Default value**: **on** + +## synchronize\_seqscans + +**Parameter description**: Specifies sequential scans of tables to synchronize with each other. Concurrent scans read the same data block about at the same time and share the I/O workload. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +- **on** indicates that a scan may start in the middle of the table and then "wrap around" the end to cover all rows to synchronize with the activity of scans already in progress. This may result in unpredictable changes in the row ordering returned by queries that have no ORDER BY clause. +- **off** indicates that the scan always starts from the table heading. + +**Default value**: **on** + +## enable\_beta\_features + +**Parameter description**: Specifies whether to enable some features that are not officially released and are used only for POC verification. Exercise caution when enabling these extended features because they may cause errors in some scenarios. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +- **on** indicates that the features are enabled and forward compatible, but may incur errors in certain scenarios. +- **off** indicates that the features are disabled. + +**Default value**: **off** + diff --git a/content/en/docs/Developerguide/competitive-overview.md b/content/en/docs/Developerguide/competitive-overview.md new file mode 100644 index 000000000..6bdb4c7aa --- /dev/null +++ b/content/en/docs/Developerguide/competitive-overview.md @@ -0,0 +1,262 @@ +# Competitive Overview + +MOT’s closest alternatives are Microsoft Hekaton and MemSQL followed by its next closest alternative the Oracle In-Memory option with their columnar engines and lastly the VoltDB database. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
  

Oracle

+

In-Memory Option

+

Oracle TimesTen Scale-out

+

MSFT SQL Hekaton

+

In-Memory OLTP

+

VoltDB

+

MemSQL

+

Huawei openGauss/ GaussDB MOT

+

x86 and ARM

+

+

+

+

+

+

+

+

Cloud Offering

+

+

+

+

+

+

+

+

+

+

IM Row-store (OLTP), DBMS

+

+

+

+

+

+

+

+

+

+

+

+

IM Column-store (OLAP/HTAP)

+

+

+

+

+

+

+

– (Disk)

+

+

Separate Engines (Disk+IM)

+

+

+

+

+

+

+

+

+

+

+

Distributed DB (Scale-out)

+

+

+

+

+

+

+

+

+

+

+

Distributed SQL / Trans.

+

– –

+

+ +

+

– –

+

+ +

+

+ +

+

+ *

+

Isolation Level (Highest)

+

Serializable

+

Serializable

+

Serializable

+

Serializable

+

Read-Committed

+

Repeatable-Read

+

* Serializable

+

MVCC

+

+

+

+

+

+

+

+

*

+

Interop (Disk+IM) SQL / Tran.

+

+ –

+

– –

+

– +

+

– –

+

+ ?

+

* *

+

High Coverage – SQL, Functions

+

+

+

+

+

+

+ –

+

+

+

+

+

Compilation – JIT / Stored Pr.

+

– –

+

– –

+

– +

+

– +

+

+ –

+

+ *

+

Anti-Caching, Data Tiering

+

– –

+

– –

+

– –

+

– –

+

– –

+

* *

+

RDMA Fast Networking

+

+

+

+

+

+

+

+

*

+

Replication, HA, RTO ~= 0

+

+ + –

+

+ + +

+

+ + –

+

+ + +

+

+ + –

+

+ + *

+
+ ++ Available **– **Unavailable \* Planned + +- **[Oracle In-Memory Option and Oracle TimesTen](oracle-in-memory-option-and-oracle-timesten.md)** + +- **[Microsoft Hekaton](microsoft-hekaton.md)** + +- **[VoltDB](voltdb.md)** + +- **[MemSQL](memsql.md)** + + diff --git a/content/en/docs/Developerguide/concepts.md b/content/en/docs/Developerguide/concepts.md new file mode 100644 index 000000000..ad658908b --- /dev/null +++ b/content/en/docs/Developerguide/concepts.md @@ -0,0 +1,22 @@ +# Concepts + +## Database + +Databases manage various data objects and are isolated from each other. While creating a database, you can specify a tablespace. If you do not specify it, the object will be saved to the **PG\_DEFAULT** tablespace by default. Objects managed by a database can be distributed to multiple tablespaces. + +## Tablespace + +In openGauss, a tablespace is a directory storing physical files of the databases the tablespace contains. Multiple tablespaces can coexist. Files are physically isolated using tablespaces and managed by a file system. + +## Schema + +openGauss schemas logically separate databases. All database objects are created under certain schemas. In openGauss, schemas and users are loosely bound. When you create a user, a schema with the same name as the user will be created automatically. You can also create a schema or specify another schema. + +## User and Role + +openGauss uses users and roles to control the access to databases. A role can be a database user or a group of database users, depending on role settings. In openGauss, the difference between roles and users is that a role does not have the **LOGIN** permission by default. In openGauss, one user can have only one role, but you can put a user's role under a parent role to grant multiple permissions to the user. + +## Transaction + +In openGauss, transactions are managed by multi-version concurrency control \(MVCC\) and two-phase locking \(2PL\). It enables smooth data reads and writes. openGauss stores them together with the version of the current tuple. A VACUUM thread is introduced to periodically clear historical version data. Unless in performance optimization, you do not need to pay attention to the **VACUUM** process. In addition, openGauss automatically commits transactions. + diff --git a/content/en/docs/Developerguide/concurrency-control-mechanism.md b/content/en/docs/Developerguide/concurrency-control-mechanism.md new file mode 100644 index 000000000..6e7dab7cd --- /dev/null +++ b/content/en/docs/Developerguide/concurrency-control-mechanism.md @@ -0,0 +1,48 @@ +# Concurrency Control Mechanism + +After investing extensive research to find the best concurrency control mechanism, we concluded that SILO[\[9\]](en-us_topic_0257713290.md#_ftn9)-based on OCC is the best ACID-compliant OCC algorithm for MOT. SILO provides the best foundation for MOT’s challenging requirements. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>MOT is fully Atomicity, Consistency, Isolation, Durability \(ACID\)-compliant, as described in the ++ section. + +## Local and Global MOT Memory + +SILO manages both a local memory and a global memory, as shown in -[Global and Local](memory-planning.md#en-us_topic_0257713337_section31231454125312). + +- **Global** memory is long-term shared memory is shared by all cores and is used primarily to store all the table data and indexes + +- **Local** memory is short-term memory that is used primarily by sessions for handling transactions and store data changes in a primate to transaction memory until the commit phase. + +When a transaction change is required, SILO handles the copying of all that transaction’s data from the global memory into the local memory. Minimal locks are placed on the global memory according to the OCC approach, so that the contention time in the global shared memory is extremely minimal. After the transaction’ change has been completed, this data is pushed back from the local memory to the global memory. + +The basic interactive transactional flow with our SILO-enhanced concurrency control is shown in the figure below. + +**Figure 1** Private \(Local\) Memory \(for each transaction\) and a Global Memory \(for all the transactions of all the cores\) +![](figures/private-(local)-memory-(for-each-transaction)-and-a-global-memory-(for-all-the-transactions-of-all-t.png "private-(local)-memory-(for-each-transaction)-and-a-global-memory-(for-all-the-transactions-of-all-t") + +For more details, refer to the vldb document. + +H. Avni at al. Industrial-Strength OLTP Using Main Memory and Many-cores, VLDB 2020. + +## SILO Enhancements for MOT + +SILO in its basic algorithm flow outperformed many other ACID-compliant OCCs that we tested in our research experiments. However, in order to make it a product-grade mechanism, we had to enhance it with many essential functionalities that were missing in the original design, such as: + +- Added support for interactive mode transactions, where transactions are running SQL by SQL from the client side and not as a single step on the server side +- Added optimistic inserts +- Added support for non-unique indexes +- Added support for read-after-write in-transaction so users can see their own changes before they are committed +- Added support for lockless cooperative garbage collection +- Added support for lockless checkpoints +- Added support for fast recovery +- Added support for two-phase commit in a distributed deployment + +Adding these enhancements without breaking the scalable characteristic of the original SILO was very challenging. + +- **[Isolation Levels](isolation-levels.md)** + +- **[Optimistic Concurrency Control](optimistic-concurrency-control.md)** + +- **[OCC vs 2PL differences by example](occ-vs-2pl-differences-by-example.md)** + + diff --git a/content/en/docs/Developerguide/concurrent-data-import-and-queries.md b/content/en/docs/Developerguide/concurrent-data-import-and-queries.md new file mode 100644 index 000000000..dc0efc9a3 --- /dev/null +++ b/content/en/docs/Developerguide/concurrent-data-import-and-queries.md @@ -0,0 +1,33 @@ +# Concurrent Data Import and Queries + +Transaction T1: + +``` +START TRANSACTION; +COPY test FROM '...'; +COMMIT; +``` + +Transaction T2: + +``` +START TRANSACTION; +SELECT * FROM test; +COMMIT; +``` + +Scenario 1: + +T1 is started but not committed. At this time, T2 is started. **COPY** of T1 and then **SELECT** of T2 starts, and both of them succeed. In this case, T2 cannot see the data added by **COPY** of T1. + +Scenario 2: + +- **READ COMMITTED** level + + T1 is started but not committed. At this time, T2 is started. **COPY** of T1 is complete and T1 is committed. In this case, T2 can see the data added by **COPY** of T1. + +- **REPEATABLE READ** level + + T1 is started but not committed. At this time, T2 is started. **COPY** of T1 is complete and T1 is committed. In this case, T2 cannot see the data added by **COPY** of T1. + + diff --git a/content/en/docs/Developerguide/concurrent-insert-and-delete-in-the-same-table.md b/content/en/docs/Developerguide/concurrent-insert-and-delete-in-the-same-table.md new file mode 100644 index 000000000..396278243 --- /dev/null +++ b/content/en/docs/Developerguide/concurrent-insert-and-delete-in-the-same-table.md @@ -0,0 +1,33 @@ +# Concurrent INSERT and DELETE in the Same Table + +Transaction T1: + +``` +START TRANSACTION; +INSERT INTO test VALUES(1,'test1','test123'); +COMMIT; +``` + +Transaction T2: + +``` +START TRANSACTION; +DELETE test WHERE NAME='test1'; +COMMIT; +``` + +Scenario 1: + +T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, **DELETE** of T2 is performed. In this case, **DELETE 0** is displayed, because T1 is not committed and T2 cannot see the data inserted by T1. + +Scenario 2: + +- **READ COMMITTED** level + + T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed and **DELETE** of T2 is executed. In this case, **DELETE 1** is displayed, because T2 can see the data inserted by T1. + +- **REPEATABLE READ** level + + T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed and **DELETE** of T2 is executed. In this case, **DELETE 0** is displayed, because the data obtained in queries is consistent in a transaction. + + diff --git a/content/en/docs/Developerguide/concurrent-insert-in-the-same-table.md b/content/en/docs/Developerguide/concurrent-insert-in-the-same-table.md new file mode 100644 index 000000000..06e8e5662 --- /dev/null +++ b/content/en/docs/Developerguide/concurrent-insert-in-the-same-table.md @@ -0,0 +1,33 @@ +# Concurrent INSERT in the Same table + +Transaction T1: + +``` +START TRANSACTION; +INSERT INTO test VALUES(2,'test2','test123'); +COMMIT; +``` + +Transaction T2: + +``` +START TRANSACTION; +INSERT INTO test VALUES(3,'test3','test123'); +COMMIT; +``` + +Scenario 1: + +T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, **INSERT** of T2 is executed and succeeds. At the **READ COMMITTED** and **REPEATABLE READ** levels, the **SELECT** statement of T1 cannot see data inserted by T2, and a query in T2 cannot see data inserted by T1. + +Scenario 2: + +- **READ COMMITTED** level + + T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed. In T2, a query executed after **INSERT** can see the data inserted by T1. + +- **REPEATABLE READ** level + + T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed. In T2, a query executed after **INSERT** cannot see the data inserted by T1. + + diff --git a/content/en/docs/Developerguide/concurrent-update-in-the-same-table.md b/content/en/docs/Developerguide/concurrent-update-in-the-same-table.md new file mode 100644 index 000000000..8352a593f --- /dev/null +++ b/content/en/docs/Developerguide/concurrent-update-in-the-same-table.md @@ -0,0 +1,34 @@ +# Concurrent UPDATE in the Same Table + +Transaction T1: + +``` +START TRANSACTION; +UPDATE test SET address='test1234' WHERE name='test1'; +COMMIT; +``` + +Transaction T2: + +``` +START TRANSACTION; +UPDATE test SET address='test1234' WHERE name='test2'; +COMMIT; +``` + +Transaction T3: + +``` +START TRANSACTION; +UPDATE test SET address='test1234' WHERE name='test1'; +COMMIT; +``` + +Scenario 1: + +T1 is started but not committed. At this time, T2 is started. **UPDATE** of T1 and then T2 starts, and both of them succeed. This is because the **UPDATE** operations use row-level locks and do not conflict when they update different rows. + +Scenario 2: + +T1 is started but not committed. At this time, T3 is started. **UPDATE** of T1 and then T3 starts, and **UPDATE** of T1 succeeds. **UPDATE** of T3 times out. This is because T1 and T3 update the same row and the lock is held by T1 at the time of the update. + diff --git a/content/en/docs/Developerguide/concurrent-write-examples.md b/content/en/docs/Developerguide/concurrent-write-examples.md new file mode 100644 index 000000000..abc7baa22 --- /dev/null +++ b/content/en/docs/Developerguide/concurrent-write-examples.md @@ -0,0 +1,17 @@ +# Concurrent Write Examples + +This section uses the **test** table as an example to describe how to perform concurrent **INSERT** and **DELETE** in the same table, concurrent **INSERT** in the same table, concurrent **UPDATE** in the same table, and concurrent import and queries. + +``` +CREATE TABLE test(id int, name char(50), address varchar(255)); +``` + +- **[Concurrent INSERT and DELETE in the Same Table](concurrent-insert-and-delete-in-the-same-table.md)** + +- **[Concurrent INSERT in the Same table](concurrent-insert-in-the-same-table.md)** + +- **[Concurrent UPDATE in the Same Table](concurrent-update-in-the-same-table.md)** + +- **[Concurrent Data Import and Queries](concurrent-data-import-and-queries.md)** + + diff --git a/content/en/docs/Developerguide/condition-expressions.md b/content/en/docs/Developerguide/condition-expressions.md new file mode 100644 index 000000000..1caac0ea7 --- /dev/null +++ b/content/en/docs/Developerguide/condition-expressions.md @@ -0,0 +1,214 @@ +# Condition Expressions + +Data that meets the requirements specified by conditional expressions are filtered during SQL statement execution. + +Conditional expressions include the following types: + +- CASE + + **CASE** expressions are similar to the **CASE** statements in other coding languages. + + [Figure 1](#en-us_topic_0237122002_en-us_topic_0059777797_f6defc8307fd0434380b6ba22838ed5f1) shows the syntax of a **CASE** expression. + + **Figure 1** case::= + ![](figures/case.jpg "case") + + A **CASE** clause can be used in a valid expression. **condition** is an expression that returns a value of Boolean type. + + - If the result is **true**, the result of the **CASE** expression is the required result. + - If the result is false, the following **WHEN** or **ELSE** clauses are processed in the same way. + - If every **WHEN condition** is false, the result of the expression is the result of the **ELSE** clause. If the **ELSE** clause is omitted and has no match condition, the result is NULL. + + Example: + + ``` + postgres=# CREATE TABLE tpcds.case_when_t1(CW_COL1 INT); + + postgres=# INSERT INTO tpcds.case_when_t1 VALUES (1), (2), (3); + + postgres=# SELECT * FROM tpcds.case_when_t1; + a + --- + 1 + 2 + 3 + (3 rows) + + postgres=# SELECT CW_COL1, CASE WHEN CW_COL1=1 THEN 'one' WHEN CW_COL1=2 THEN 'two' ELSE 'other' END FROM tpcds.case_when_t1 ORDER BY 1; + cw_col1 | case + ---------+------- + 1 | one + 2 | two + 3 | other + (3 rows) + + postgres=# DROP TABLE tpcds.case_when_t1; + ``` + +- DECODE + + [Figure 2](#en-us_topic_0237122002_en-us_topic_0059777797_f8e62b15fa92349339fcdb77fcc5fef4d) shows the syntax of a **DECODE** expression. + + **Figure 2** decode::= + ![](figures/decode.png "decode") + + Compare each following **compare\(n\)** with **base\_expr**, **value\(n\)** is returned if a **compare\(n\)** matches the **base\_expr** expression. If base\_expr does not match each **compare\(n\)**, the default value is returned. + + [Conditional Expression Functions](conditional-expression-functions.md) describes the examples. + + ``` + postgres=# SELECT DECODE('A','A',1,'B',2,0); + case + ------ + 1 + (1 row) + ``` + +- COALESCE + + [Figure 3](#en-us_topic_0237122002_en-us_topic_0059777797_f1877c9f8d2ac4964828a6eaaddf5f35f) shows the syntax of a **COALESCE** expression. + + **Figure 3** coalesce::= + ![](figures/coalesce.png "coalesce") + + **COALESCE** returns its first not-**NULL** value. If all the parameters are **NULL**, **COALESCE** will return **NULL**. This value is replaced by the default value when data is displayed. Like a **CASE** expression, **COALESCE** only evaluates the parameters that are needed to determine the result. That is, parameters to the right of the first not-**NULL** parameter are not evaluated. + + Example + + ``` + postgres=# CREATE TABLE tpcds.c_tabl(description varchar(10), short_description varchar(10), last_value varchar(10)) + ; + + postgres=# INSERT INTO tpcds.c_tabl VALUES('abc', 'efg', '123'); + postgres=# INSERT INTO tpcds.c_tabl VALUES(NULL, 'efg', '123'); + + postgres=# INSERT INTO tpcds.c_tabl VALUES(NULL, NULL, '123'); + + postgres=# SELECT description, short_description, last_value, COALESCE(description, short_description, last_value) FROM tpcds.c_tabl ORDER BY 1, 2, 3, 4; + description | short_description | last_value | coalesce + -------------+-------------------+------------+---------- + abc | efg | 123 | abc + | efg | 123 | efg + | | 123 | 123 + (3 rows) + + postgres=# DROP TABLE tpcds.c_tabl; + ``` + + If **description** is not **NULL**, the value of **description** is returned. Otherwise, parameter **short\_description** is calculated. If **short\_description** is not **NULL**, the value of **short\_description** is returned. Otherwise, parameter **last\_value** is calculated. If **last\_value** is not **NULL**, the value of **last\_value** is returned. Otherwise, **none** is returned. + + ``` + postgres=# SELECT COALESCE(NULL,'Hello World'); + coalesce + --------------- + Hello World + (1 row) + ``` + +- NULLIF + + [Figure 4](#en-us_topic_0237122002_en-us_topic_0059777797_f6c5bc64bf5de4b728ed1d73d97768e6e) shows the syntax of a **NULLIF** expression. + + **Figure 4** nullif::= + ![](figures/nullif.png "nullif") + + Only if **value1** is equal to **value2** can **NULLIF** return the **NULL** value. Otherwise, **value1** is returned. + + Example: + + ``` + postgres=# CREATE TABLE tpcds.null_if_t1 ( + NI_VALUE1 VARCHAR(10), + NI_VALUE2 VARCHAR(10) + ); + + postgres=# INSERT INTO tpcds.null_if_t1 VALUES('abc', 'abc'); + postgres=# INSERT INTO tpcds.null_if_t1 VALUES('abc', 'efg'); + + postgres=# SELECT NI_VALUE1, NI_VALUE2, NULLIF(NI_VALUE1, NI_VALUE2) FROM tpcds.null_if_t1 ORDER BY 1, 2, 3; + + ni_value1 | ni_value2 | nullif + -----------+-----------+-------- + abc | abc | + abc | efg | abc + (2 rows) + postgres=# DROP TABLE tpcds.null_if_t1; + ``` + + If **value1** is equal to **value2**, **NULL** is returned. Otherwise, **value1** is returned. + + ``` + postgres=# SELECT NULLIF('Hello','Hello World'); + nullif + -------- + Hello + (1 row) + ``` + +- GREATEST \(maximum value\) and LEAST \(minimum value\) + + [Figure 5](#en-us_topic_0237122002_en-us_topic_0059777797_f23a83b0f987a49e0b6890280568afbd2) shows the syntax of a **GREATEST** expression. + + **Figure 5** greatest::= + ![](figures/greatest.png "greatest") + + You can select the maximum value from any numerical expression list. + + ``` + postgres=# SELECT greatest(9000,155555,2.01); + greatest + ---------- + 155555 + (1 row) + ``` + + [Figure 6](#en-us_topic_0237122002_en-us_topic_0059777797_f30a16b0edbde4750a42053619840b384) shows the syntax of a **LEAST** expression. + + **Figure 6** least::= + ![](figures/least.png "least") + + You can select the minimum value from any numerical expression list. + + Each of the preceding numeric expressions can be converted into a common data type, which will be the data type of the result. + + The NULL values in the list will be ignored. The result is **NULL** only if the results of all expressions are **NULL**. + + ``` + postgres=# SELECT least(9000,2); + least + ------- + 2 + (1 row) + ``` + + [Conditional Expression Functions](conditional-expression-functions.md) describes the examples. + +- NVL + + [Figure 7](#en-us_topic_0237122002_en-us_topic_0059777797_f69cd4e01dd6e4280b756eb98d3c77c91) shows the syntax of an **NVL** expression. + + **Figure 7** nvl::= + ![](figures/nvl.jpg "nvl") + + If the value of **value1** is **NULL**, **value2** is returned. Otherwise, **value1** is returned. + + Example: + + ``` + postgres=# SELECT nvl(null,1); + NVL + ----- + 1 + (1 row) + + ``` + + ``` + postgres=# SELECT nvl ('Hello World' ,1); + nvl + --------------- + Hello World + (1 row) + ``` + + diff --git a/content/en/docs/Developerguide/conditional-expression-functions.md b/content/en/docs/Developerguide/conditional-expression-functions.md new file mode 100644 index 000000000..f5987676b --- /dev/null +++ b/content/en/docs/Developerguide/conditional-expression-functions.md @@ -0,0 +1,171 @@ +# Conditional Expression Functions + +## Conditional Expression Functions + +- coalesce\(expr1, expr2, ..., exprn\) + + Description: + + Returns the first of its arguments that are not null. + + **COALESCE\(expr1, expr2\)** is equivalent to **CASE WHEN expr1 IS NOT NULL THEN expr1 ELSE expr2 END**. + + Example: + + ``` + postgres=# SELECT coalesce(NULL,'hello'); + coalesce + ---------- + hello + (1 row) + ``` + + Note: + + - Null is returned only if all parameters are null. + - This value is replaced by the default value when data is displayed. + - Like a **CASE** expression, **COALESCE** only evaluates the parameters that are needed to determine the result. That is, parameters to the right of the first not-**NULL** parameter are not evaluated. + +- decode\(base\_expr, compare1, value1, Compare2,value2, ... default\) + + Description: Compares **base\_expr** with each **compare\(n\)** and **returns value\(n\)** if they are matched. If **base\_expr** does not match each **compare\(n\)**, the default value is returned. + + Example: + + ``` + postgres=# SELECT decode('A','A',1,'B',2,0); + case + ------ + 1 + (1 row) + ``` + +- nullif\(expr1, expr2\) + + Description: Returns **NULL** only when **expr1** is equal to **expr2**. Otherwise, **expr1** is returned. + + **nullif\(expr1, expr2\)** is equivalent to **CASE WHEN expr1 = expr2 THEN NULL ELSE expr1 END**. + + Example: + + ``` + postgres=# SELECT nullif('hello','world'); + nullif + -------- + hello + (1 row) + ``` + + Note: + + Assume the two parameter data types are different: + + - If implicit conversion exists between the two data types, implicitly convert the parameter of lower priority to this data type using the data type of higher priority. If the conversion succeeds, computation is performed. Otherwise, an error is returned. For example: + + ``` + postgres=# SELECT nullif('1234'::VARCHAR,123::INT4); + nullif + -------- + 1234 + (1 row) + ``` + + ``` + postgres=# SELECT nullif('1234'::VARCHAR,'2012-12-24'::DATE); + ERROR: invalid input syntax for type timestamp: "1234" + ``` + + - If implicit conversion is not applied between two data types, an error is displayed. For example: + + ``` + postgres=# SELECT nullif(TRUE::BOOLEAN,'2012-12-24'::DATE); + ERROR: operator does not exist: boolean = timestamp without time zone + LINE 1: SELECT nullif(TRUE::BOOLEAN,'2012-12-24'::DATE) FROM DUAL; + ^ + HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. + ``` + + +- nvl\( expr1 , expr2 \) + + Description: + + - If **expr1** is **NULL**, **expr2** is returned. + - If **expr1** is not **NULL**, **expr1** is returned. + + Example: + + ``` + postgres=# SELECT nvl('hello','world'); + nvl + ------- + hello + (1 row) + ``` + + Parameters **expr1** and **expr2** can be of any data type. If **expr1** and **expr2** are of different data types, NVL checks whether **expr2** can be implicitly converted to **expr1**. If it can, the **expr1** data type is returned. If **epr2** cannot be implicitly converted to **expr1** but **epr1** can be implicitly converted to **expr2**, the **expr2** data type is returned. If no implicit type conversion exists between the two parameters and the parameters are different data types, an error is reported. + +- greatest\(expr1 \[, ...\]\) + + Description: Selects the largest value from a list of any number of expressions. + + Return type: + + Example: + + ``` + postgres=# SELECT greatest(1*2,2-3,4-1); + greatest + ---------- + 3 + (1 row) + ``` + + ``` + postgres=# SELECT greatest('HARRY', 'HARRIOT', 'HAROLD'); + greatest + ---------- + HARRY + (1 row) + ``` + +- least\(expr1 \[, ...\]\) + + Description: Selects the smallest value from a list of any number of expressions. + + Example: + + ``` + postgres=# SELECT least(1*2,2-3,4-1); + least + ------- + -1 + (1 row) + ``` + + ``` + postgres=# SELECT least('HARRY','HARRIOT','HAROLD'); + least + -------- + HAROLD + (1 row) + ``` + +- EMPTY\_BLOB\(\) + + Description: Initiates a BLOB variable in an **INSERT** or an **UPDATE** statement to a **NULL** value. + + Return type: BLOB + + Example: + + ``` + -- Create a table. + postgres=# CREATE TABLE blob_tb(b blob,id int); + -- Insert data. + postgres=# INSERT INTO date_tb VALUES (empty_blob(),1); + --Delete the table. + postgres=# DROP TABLE blob_tb; + ``` + + diff --git a/content/en/docs/Developerguide/conditional-statements.md b/content/en/docs/Developerguide/conditional-statements.md new file mode 100644 index 000000000..3993932dc --- /dev/null +++ b/content/en/docs/Developerguide/conditional-statements.md @@ -0,0 +1,102 @@ +# Conditional Statements + +Conditional statements are used to decide whether given conditions are met. Operations are executed based on the decisions made. + +openGauss supports five usages of **IF**: + +- IF\_THEN + + **Figure 1** IF\_THEN::= + ![](figures/if_then.jpg "if_then") + + **IF\_THEN** is the simplest form of **IF**. If the condition is true, statements are executed. If it is false, they are skipped. + + Example: + + ``` + postgres=# IF v_user_id <> 0 THEN + UPDATE users SET email = v_email WHERE user_id = v_user_id; + END IF; + ``` + +- IF\_THEN\_ELSE + + **Figure 2** IF\_THEN\_ELSE::= + ![](figures/if_then_else.jpg "if_then_else") + + **IF-THEN-ELSE** statements add **ELSE** branches and can be executed if the condition is false. + + Example: + + ``` + postgres=# IF parentid IS NULL OR parentid = '' + THEN + RETURN; + ELSE + hp_true_filename(parentid); -- Call the stored procedure. + END IF; + ``` + +- IF\_THEN\_ELSE IF + + **IF** statements can be nested in the following way: + + ``` + postgres=# IF sex = 'm' THEN + pretty_sex := 'man'; + ELSE + IF sex = 'f' THEN + pretty_sex := 'woman'; + END IF; + END IF; + ``` + + Actually, this is a way of an **IF** statement nesting in the **ELSE** part of another **IF** statement. Therefore, an **END IF** statement is required for each nesting **IF** statement and another **END IF** statement is required to end the parent **IF-ELSE** statement. To set multiple options, use the following form: + +- IF\_THEN\_ELSIF\_ELSE + + **Figure 3** IF\_THEN\_ELSIF\_ELSE::= + ![](figures/if_then_elsif_else.png "if_then_elsif_else") + + Example: + + ``` + IF number_tmp = 0 THEN + result := 'zero'; + ELSIF number_tmp > 0 THEN + result := 'positive'; + ELSIF number_tmp < 0 THEN + result := 'negative'; + ELSE + result := 'NULL'; + END IF; + ``` + +- IF\_THEN\_ELSEIF\_ELSE + + **ELSEIF** is an alias of **ELSIF**. + + Example: + + ``` + CREATE OR REPLACE PROCEDURE proc_control_structure(i in integer) + AS + BEGIN + IF i > 0 THEN + raise info 'i:% is greater than 0. ',i; + ELSIF i < 0 THEN + raise info 'i:% is smaller than 0. ',i; + ELSE + raise info 'i:% is equal to 0. ',i; + END IF; + RETURN; + END; + / + + CALL proc_control_structure(3); + + -- Delete the stored procedure. + DROP PROCEDURE proc_control_structure; + ``` + + diff --git a/content/en/docs/Developerguide/config_settings.md b/content/en/docs/Developerguide/config_settings.md new file mode 100644 index 000000000..fcaefd1a2 --- /dev/null +++ b/content/en/docs/Developerguide/config_settings.md @@ -0,0 +1,130 @@ +# CONFIG\_SETTINGS + +**CONFIG\_SETTINGS** displays information about parameters of the running database. + +**Table 1** CONFIG\_SETTINGS columns + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Type

+

Description

+

name

+

text

+

Parameter name

+

setting

+

text

+

Current parameter value

+

unit

+

text

+

Implicit unit of the parameter

+

category

+

text

+

Logical group of the parameter

+

short_desc

+

text

+

Brief description of the parameter

+

extra_desc

+

text

+

Detailed description of the parameter

+

context

+

text

+

Context required to set the parameter value, including internal, postmaster, sighup, backend, superuser, and user

+

vartype

+

text

+

Parameter type, including bool, enum, integer, real, or string

+

source

+

text

+

Method of assigning the parameter value

+

min_val

+

text

+

Maximum value of the parameter. If the parameter type is not numeric, the value of this column is null.

+

max_val

+

text

+

Minimum value of the parameter. If the parameter type is not numeric, the value of this column is null.

+

enumvals

+

text[]

+

Valid values of an enum-type parameter. If the parameter type is not enum, the value of this column is null.

+

boot_val

+

text

+

Default parameter value used upon the database startup

+

reset_val

+

text

+

Default parameter value used upon the database reset

+

sourcefile

+

text

+

Configuration file used to set parameter values. If parameter values are not configured using the configuration file, the value of this column is null.

+

sourceline

+

integer

+

Row number in the configuration file for setting parameter values. If parameter values are not configured using the configuration file, the value of this column is null.

+
+ diff --git a/content/en/docs/Developerguide/configuration-examples.md b/content/en/docs/Developerguide/configuration-examples.md new file mode 100644 index 000000000..c2f1ddc54 --- /dev/null +++ b/content/en/docs/Developerguide/configuration-examples.md @@ -0,0 +1,111 @@ +# Configuration Examples + +Text search configuration specifies the following components required for converting a document into a **tsvector**: + +- A parser, decomposes a text into tokens. +- Dictionary list, converts each token into a lexeme. + +Each time when the **to\_tsvector** or **to\_tsquery** function is invoked, a text search configuration is required to specify a processing procedure. The GUC parameter [default\_text\_search\_config](zone-and-formatting.md#en-us_topic_0237124733_en-us_topic_0059778109_sd9a07d429cd4498383931c621742b816) specifies the default text search configuration, which will be used if the text search function does not explicitly specify a text search configuration. + +openGauss provides some predefined text search configurations. You can also create user-defined text search configurations. In addition, to facilitate the management of text search objects, multiple **gsql** meta-commands are provided to display information about text search objects. For details, see "Client Tool \> Meta-Command Reference" in _Tool Reference_. + +## Procedure + +1. Create a text search configuration **ts\_conf** by copying the predefined text search configuration **english**. + + ``` + postgres=# CREATE TEXT SEARCH CONFIGURATION ts_conf ( COPY = pg_catalog.english ); + CREATE TEXT SEARCH CONFIGURATION + ``` + +2. Create a **Synonym** dictionary. + + Assume that the definition file **pg\_dict.syn** of the **Synonym** dictionary contains the following contents: + + ``` + postgres pg + pgsql pg + postgresql pg + ``` + + Run the following statement to create the **Synonym** dictionary: + + ``` + postgres=# CREATE TEXT SEARCH DICTIONARY pg_dict ( + TEMPLATE = synonym, + SYNONYMS = pg_dict, + FILEPATH = 'file:///home/dicts' + ); + ``` + +3. Create an **Ispell** dictionary **english\_ispell** \(the dictionary definition file is from the open source dictionary\). + + ``` + postgres=# CREATE TEXT SEARCH DICTIONARY english_ispell ( + TEMPLATE = ispell, + DictFile = english, + AffFile = english, + StopWords = english, + FILEPATH = 'file:///home/dicts' + ); + ``` + +4. Modify the text search configuration **ts\_conf** and change the dictionary list for tokens of certain types. For details about token types, see [Parser](parser.md). + + ``` + postgres=# ALTER TEXT SEARCH CONFIGURATION ts_conf + ALTER MAPPING FOR asciiword, asciihword, hword_asciipart, + word, hword, hword_part + WITH pg_dict, english_ispell, english_stem; + ``` + +5. In the text search configuration, set non-index or set the search for tokens of certain types. + + ``` + postgres=# ALTER TEXT SEARCH CONFIGURATION ts_conf + DROP MAPPING FOR email, url, url_path, sfloat, float; + ``` + +6. Use the text retrieval commissioning function ts\_debug\(\) to test the text search configuration **ts\_conf**. + + ``` + postgres=# SELECT * FROM ts_debug('ts_conf', ' + PostgreSQL, the highly scalable, SQL compliant, open source object-relational + database management system, is now undergoing beta testing of the next + version of our software. + '); + ``` + +7. You can set the default text search configuration of the current session to **ts\_conf**. This setting is valid only for the current session. + + ``` + postgres=# \dF+ ts_conf + Text search configuration "public.ts_conf" + Parser: "pg_catalog.default" + Token | Dictionaries + -----------------+------------------------------------- + asciihword | pg_dict,english_ispell,english_stem + asciiword | pg_dict,english_ispell,english_stem + file | simple + host | simple + hword | pg_dict,english_ispell,english_stem + hword_asciipart | pg_dict,english_ispell,english_stem + hword_numpart | simple + hword_part | pg_dict,english_ispell,english_stem + int | simple + numhword | simple + numword | simple + uint | simple + version | simple + word | pg_dict,english_ispell,english_stem + + postgres=# SET default_text_search_config = 'public.ts_conf'; + SET + postgres=# SHOW default_text_search_config; + default_text_search_config + ---------------------------- + public.ts_conf + (1 row) + ``` + + diff --git a/content/en/docs/Developerguide/configuration-file-reference.md b/content/en/docs/Developerguide/configuration-file-reference.md new file mode 100644 index 000000000..1cd6bc2a4 --- /dev/null +++ b/content/en/docs/Developerguide/configuration-file-reference.md @@ -0,0 +1,133 @@ +# Configuration File Reference + +**Table 1** Parameter description + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Value Range

+

local

+

Indicates that this record accepts only the Unix-domain-socket connection. If no such type of record exists, Unix-domain-socket connections are not allowed.

+

When gsql is used to initiate a connection from a local server and the -U parameter is not specified, a Unix-domain-socket connection is established.

+

-

+

host

+

Indicates that this record accepts either a common TCP/IP-socket connection or a TCP/IP-socket connection encrypted through SSL.

+

-

+

hostssl

+

Indicates that this record accepts only a TCP/IP socket connection encrypted through SSL.

+

For the connection encrypted through SSL, you need to apply for a digital certificate and configure related parameters. For details, see Establishing Secure TCP/IP Connections in SSL Mode.

+

hostnossl

+

Indicates that this record accepts only a common TCP/IP socket connection.

+

-

+

DATABASEGUC

+

Database that a record matches and can access

+
  • all: indicates that this record matches all databases.
  • sameuser: indicates that the database must have the same name as the user who requests database access.
  • samerole: indicates that this record matches a database if the user who requests the database is a member of a role having the same name as the database.
  • samegroup: is the same as that of samerole and indicates that this record matches a database if the user who requests the database is a member of a role having the same name as the database.
  • A file containing database names with an at sign (@) added before the file name, or a database list in a file using commas (,) or line feeds to separate databases
  • A specific database name or a list of databases separated by commas (,)
    NOTE:

    replication indicates that if a replication link is requested, the records match the link. But this does not mean the record matches any specific database. To use a database named replication, specify it in the database column.

    +
    +
+

USER

+

Users who match the record and are allowed to access databases

+
  • all: indicates that this record matches all users.
  • +User role: indicates that this record matches all members that directly or indirectly belong to the role.
    NOTE:

    + is a prefix character.

    +
    +
  • A file containing usernames, with an at sign (@) added before the file name. Users in the file are separated by commas (,) or line feeds.
  • A specific database username or a list of users separated by commas (,)
+

ADDRESS

+

Range of IP addresses that match the record and can be visited

+

IPv4 and IPv6 are supported. The IP address range can be expressed in the following two formats:

+
  • IP address/mask length Example: 10.10.0.0/24
  • IP address Subnet mask Example: 10.10.0.0 255.255.255.0
+
NOTE:

An IPv4 address matches the IPv6 connection with the corresponding address. For example, 127.0.0.1 matches IPv6 address ::ffff:127.0.0.1.

+
+

METHOD

+

Authentication method used for connection

+

The following authentication modes are supported. For details, see Table 2.

+
  • trust
  • reject
  • md5 (not recommended and not supported by default. This authentication mode can be configured using the password_encryption_type parameter.)
  • sha256
  • cert
  • gss (only for authentication within openGauss)
+
+ +**Table 2** Authentication modes + + + + + + + + + + + + + + + + + + + + + + + + + +

Authentication Mode

+

Remarks

+

trust

+

In trust mode, only the connection initiated from the local server using gsql with the -U parameter not specified is trusted. In this case, no password is required.

+

The trust authentication mode applies to local connection of a single-user workstation, but not of a multi-user workstation. To use the trust authentication, you can use the file system permissions to control the access to the Unix-domain socket file on the server. You can use either of the following methods to control the access:

+ +
NOTICE:

Setting the file system permission imposes restrictions on only Unix-domain socket connections, and does not affect local TCP/IP connections. To ensure local TCP/IP security, openGauss does not allow the trust authentication for remote connection.

+
+

reject

+

Rejects connection unconditionally. This authentication mode is usually used for filtering certain hosts.

+

md5

+

Requires that the client must provide an MD5-encrypted password for authentication.

+
NOTICE:

This authentication method is not recommended because MD5 is not a secure encryption algorithm and may cause network risks. openGauss retains MD5 authentication and password storage to facilitate use of third-party tools (such as the TPCC test tool).

+
+

sha256

+

Requires that the client must provide a sha256-encrypted password for authentication. The password is encrypted based on the unidirectional sha256 of salt (a random number sent from the server to the client) when being transmitted, enhancing the security.

+

cert

+

Client certificate authentication mode. In this mode, the SSL connection must be configured and the client must provide a valid SSL certificate. The user password is not required.

+
NOTICE:

This authentication mode supports only hostssl rules.

+
+

gss

+

Uses the GSSAPI-based Kerberos authentication.

+
NOTICE:
  • This authentication mode depends on components such as the Kerberos server. It supports only authentication for communication within openGauss. In the current version, Kerberos authentication cannot be used to connect to external clients.
  • Enabling Kerberos authentication within openGauss slows down the connection setup among nodes in openGauss. The performance of SQL operations during the setup is affected, but later operations are not.
+
+
+ diff --git a/content/en/docs/Developerguide/configuration-settings-functions.md b/content/en/docs/Developerguide/configuration-settings-functions.md new file mode 100644 index 000000000..cf760c95e --- /dev/null +++ b/content/en/docs/Developerguide/configuration-settings-functions.md @@ -0,0 +1,39 @@ +# Configuration Settings Functions + +Configuration setting functions are used for querying and modifying configuration parameters during running. + +- current\_setting\(setting\_name\) + + Description: Specifies the current setting. + + Return type: text + + Note: **current\_setting** obtains the current setting of **setting\_name** by query. It is equivalent to the **SHOW** statement. For example: + + ``` + postgres=# SELECT current_setting('datestyle'); + + current_setting + ----------------- + ISO, MDY + (1 row) + ``` + +- set\_config\(setting\_name, new\_value, is\_local\) + + Description: Sets the parameter and returns a new value. + + Return type: text + + Note: **set\_config** sets the parameter **setting\_name** to **new\_value**. If **is\_local** is **true**, the new value will only apply to the current transaction. If you want the new value to apply for the current session, use **false** instead. The function corresponds to the **SET** statement. For example: + + ``` + postgres=# SELECT set_config('log_statement_stats', 'off', false); + + set_config + ------------ + off + (1 row) + ``` + + diff --git a/content/en/docs/Developerguide/configuration.md b/content/en/docs/Developerguide/configuration.md new file mode 100644 index 000000000..92fc3738b --- /dev/null +++ b/content/en/docs/Developerguide/configuration.md @@ -0,0 +1,7 @@ +# Configuration + +- **[CONFIG\_SETTINGS](config_settings.md)** + +- **[GLOBAL\_CONFIG\_SETTINGS](global_config_settings.md)** + + diff --git a/content/en/docs/Developerguide/configurations.md b/content/en/docs/Developerguide/configurations.md new file mode 100644 index 000000000..e88b4d8a3 --- /dev/null +++ b/content/en/docs/Developerguide/configurations.md @@ -0,0 +1,15 @@ +# Configurations + +Full text search functionality includes the ability to do many more things: skip indexing certain words \(stop words\), process synonyms, and use sophisticated parsing, for example, parse based on more than just white space. This functionality is controlled by text search configurations. openGauss comes with predefined configurations for many languages, and you can easily create your own configurations. \(The **\\dF** command of **gsql** shows all available configurations.\) + +During installation an appropriate configuration is selected and **default\_text\_search\_config** is set accordingly in **postgresql.conf**. If you are using the same text search configuration for openGauss, you can use the value in **postgresql.conf**. To use different configurations throughout openGauss but the same configuration within any one database, use ALTER DATABASE ... SET. Otherwise, you can set **default\_text\_search\_config** in each session. + +Each text search function that depends on a configuration has an optional argument, so that the configuration to use can be specified explicitly. **default\_text\_search\_config** is used only when this argument is omitted. + +To make it easier to build custom text search configurations, a configuration is built up from simpler database objects. openGauss's text search facility provides the following types of configuration-related database objects: + +- Text search parsers break documents into tokens and classify each token \(for example, as words or numbers\). +- Text search dictionaries convert tokens to normalized form and reject stop words. +- Text search templates provide the functions underlying dictionaries. \(A dictionary simply specifies a template and a set of parameters for the template.\) +- Text search configurations select a parser and a set of dictionaries to use to normalize the tokens produced by the parser. + diff --git a/content/en/docs/Developerguide/configuring-a-data-source-in-the-linux-os.md b/content/en/docs/Developerguide/configuring-a-data-source-in-the-linux-os.md new file mode 100644 index 000000000..6383ec35d --- /dev/null +++ b/content/en/docs/Developerguide/configuring-a-data-source-in-the-linux-os.md @@ -0,0 +1,446 @@ +# Configuring a Data Source in the Linux OS + +The ODBC driver \(psqlodbcw.so\) provided by openGauss can be used after it has been configured in a data source. To configure a data source, you must configure the **odbc.ini** and **odbcinst.ini** files on the server. The two files are generated during the unixODBC compilation and installation, and are saved in the **/usr/local/etc** directory by default. + +## Procedure + +1. Obtain the source code package of unixODBC. Try the following link: + + http://sourceforge.net/projects/unixodbc/files/unixODBC/2.3.0/unixODBC-2.3.0.tar.gz/download + +2. Install unixODBC. It does not matter if unixODBC of another version has been installed. + + Currently, unixODBC-2.2.1 is not supported. For example, to install unixODBC-2.3.0, run the commands below. unixODBC is installed in the **/usr/local** directory by default. The data source file is generated in the **/usr/local/etc** directory, and the library file is generated in the **/usr/local/lib** directory. + + ``` + tar zxvf unixODBC-2.3.0.tar.gz + cd unixODBC-2.3.0 + #Open the configure file. If it does not exist, open the configure.ac file. Find LIB_VERSION. + #Change the value of LIB_VERSION to 1:0:0 to compile a *.so.1 dynamic library with the same dependency on psqlodbcw.so. + vim configure + + ./configure --enable-gui=no #To perform compilation on a Kunpeng server, add the configure parameter --build=aarch64-unknown-linux-gnu. + make + #The installation may require root permissions. + make install + ``` + +3. Replace the openGauss driver on the client. + 1. Decompress **openGauss-**_1.0.0_**-ODBC.tar.gz** to the **/usr/local/lib** directory. **psqlodbcw.la** and **psqlodbcw.so** files are obtained. + 2. Copy the library in the **lib** directory obtained after decompressing **openGauss-**_1.0.0_**-ODBC.tar.gz** to the **/usr/local/lib** directory. + +4. Configure a data source. + 1. Configure the ODBC driver file. + + Add the following content to the end of the **/usr/local/etc/odbcinst.ini** file: + + ``` + [GaussMPP] + Driver64=/usr/local/lib/psqlodbcw.so + setup=/usr/local/lib/psqlodbcw.so + ``` + + For descriptions of the parameters in the **odbcinst.ini** file, see [Table 1](#en-us_topic_0237120407_en-us_topic_0059778464_td564f21e7c8e458bbd741b09896f5d91). + + **Table 1** odbcinst.ini configuration parameters + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Example

+

[DriverName]

+

Driver name, corresponding to the driver in DSN.

+

[DRIVER_N]

+

Driver64

+

Path of the dynamic driver library.

+

Driver64=/xxx/odbc/lib/psqlodbcw.so

+

setup

+

Driver installation path, which is the same as the dynamic library path in Driver64.

+

setup=/xxx/odbc/lib/psqlodbcw.so

+
+ + 2. Configure the data source file. + + Add the following content to the end of the **/usr/local/etc/odbc.ini** file: + + ``` + [MPPODBC] + Driver=GaussMPP + Servername=10.10.0.13 (database server IP address) + Database=postgres (database name) + Username=omm (database username) + Password= (database user password) + Port=8000 (database listening port) + Sslmode=allow + ``` + + For descriptions of the parameters in the **odbc.ini** file, see [Table 2](#en-us_topic_0237120407_en-us_topic_0059778464_t55845a6555f2454297b64ce47ad3d648). + + **Table 2** odbc.ini configuration parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Example

+

[DSN]

+

Data source name

+

[MPPODBC]

+

Driver

+

Driver name, corresponding to DriverName in odbcinst.ini

+

Driver=DRIVER_N

+

Servername

+

Server IP address

+

Servername=10.145.130.26

+

Database

+

Name of the database to connect

+

Database=postgres

+

Username

+

Database username

+

Username=omm

+

Password

+

Database user password

+

Password=

+
NOTE:

After a user established a connection, the ODBC driver automatically clears their password stored in memory.

+

However, if this parameter is configured, UnixODBC will cache data source files, which may cause the password to be stored in the memory for a long time.

+

When you connect to an application, you are advised to send your password through an API instead of writing it in a data source configuration file. After the connection has been established, immediately clear the memory segment where your password is stored.

+
+

Port

+

Port number of the server

+

Port=8000

+

Sslmode

+

Whether to enable SSL

+

Sslmode=allow

+

UseServerSidePrepare

+

Whether to enable the extended query protocol for the database.

+

The value can be 0 or 1. The default value is 1, indicating that the extended query protocol is enabled.

+

UseServerSidePrepare=1

+

UseBatchProtocol

+

Whether to enable the batch query protocol. If it is enabled, DML performance can be improved. The value can be 0 or 1. The default value is 1.

+

If this parameter is set to 0, the batch query protocol is disabled (mainly for communication with earlier database versions).

+

If this parameter is set to 1 and support_batch_bind is set to on, the batch query protocol is enabled.

+

UseBatchProtocol=1

+

ConnectionExtraInfo

+

Whether to display the driver deployment path and process owner in the connection_info parameter mentioned in connection_info.

+

ConnectionExtraInfo=1

+
NOTE:

The default value is 0. If this parameter is set to 1, the ODBC driver reports the driver deployment path and process owner to the database and displays the information in the connection_info parameter (see connection_info). In this case, you can query the information from PG_STAT_ACTIVITY.

+
+
+ + The valid values of **sslmode** are as follows: + + **Table 3** sslmode options and description + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

sslmode

+

Whether SSL Encryption Is Enabled

+

Description

+

disable

+

No

+

SSL connection is not enabled.

+

allow

+

Possible

+

If the database server requires SSL connection, SSL connection can be enabled. However, authenticity of the database server will not be verified.

+

prefer

+

Possible

+

If the database supports SSL connection, SSL connection is recommended. However, authenticity of the database server will not be verified.

+

require

+

Yes

+

SSL connection is required and data is encrypted. However, authenticity of the database server will not be verified.

+

verify-ca

+

Yes

+

SSL connection is required and whether the database has a trusted certificate will be verified.

+

verify-full

+

Yes

+

SSL connection is required. In addition to the check scope specified by verify-ca, the system checks whether the name of the host where the database resides is the same as that in the certificate. openGauss does not support this mode.

+
+ +5. Generate an SSL certificate. For details, see [Generating Certificates](generating-certificates.md). +6. Replace an SSL certificate. For details, see [Replacing Certificates](replacing-certificates.md). +7. Configure the database server. + 1. Log in as the OS user **omm** to the primary node of the database. + 2. Run the following command to add NIC IP addresses or host names, with values separated by commas \(,\). The NICs and hosts are used to provide external services. In the following command, _NodeName_ specifies the name of the current node. + + ``` + gs_guc reload -N NodeName -I all -c "listen_addresses='localhost,192.168.0.100,10.11.12.13'" + ``` + + If direct routing of LVS is used, add the virtual IP address \(10.11.12.13\) of LVS to the server listening list. + + You can also set **listen\_addresses** to **\*** or **0.0.0.0** to listen to all NICs, but this incurs security risks and is not recommended. + + 3. Run the following command to add an authentication rule to the configuration file of the primary database node. In this example, the IP address \(10.11.12.13\) of the client is the remote host IP address. + + ``` + gs_guc reload -N all -I all -h "host all jack 10.11.12.13/32 sha256" + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- **-N all** indicates all hosts in openGauss. + >- **-I all** indicates all instances of the host. + >- **-h** specifies statements that need to be added in the **pg\_hba.conf** file. + >- **all** indicates that a client can connect to any database. + >- **jack** indicates the user that accesses the database. + >- **_10.11.12.13/__32_** indicates hosts whose IP address is 10.11.12.13 can be connected. Configure the parameter based on your network conditions. **32** indicates that there are 32 bits whose value is 1 in the subnet mask. That is, the subnet mask is 255.255.255.255. + >- **sha256** indicates that the password of user **jack** is encrypted using the SHA-256 algorithm. + + If the ODBC client and the primary database node to connect are deployed on the same machine, you can use the local trust authentication mode. Run the following command: + + ``` + local all all xxx.xxx.xxx.xxx/32 trust + ``` + + If the ODBC client and the primary database node to connect are deployed on different machines, use the SHA-256 authentication mode. Run the following command: + + ``` + host all all xxx.xxx.xxx.xxx/32 sha256 + ``` + + 4. Restart openGauss. + + ``` + gs_om -t stop + gs_om -t start + ``` + +8. Configure environment variables. + + ``` + vim ~/.bashrc + ``` + + Add the following information to the configuration file: + + ``` + export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH + export ODBCSYSINI=/usr/local/etc + export ODBCINI=/usr/local/etc/odbc.ini + ``` + +9. Run the following command to validate the addition: + + ``` + source ~/.bashrc + ``` + + +## Verifying the Data Source Configuration + +Run the **isql-v** _GaussODBC_ command \(**_GaussODBC_** is the data source name\). + +- If the following information is displayed, the configuration is correct and the connection succeeds. + + ``` + +---------------------------------------+ + | Connected! | + | | + | sql-statement | + | help [tablename] | + | quit | + | | + +---------------------------------------+ + SQL> + ``` + +- If error information is displayed, the configuration is incorrect. Check the configuration. + +## FAQs + +- \[UnixODBC\]\[Driver Manager\]Can't open lib 'xxx/xxx/psqlodbcw.so' : file not found. + + Possible causes: + + - The path configured in the **odbcinst.ini** file is incorrect. + + Run **ls** to check the path in the error information, and ensure that the **psqlodbcw.so** file exists and you have execute permissions on it. + + - The dependent library of **psqlodbcw.so** does not exist or is not in system environment variables. + + Run **ldd** to check the path in the error information. If **libodbc.so.1** or other UnixODBC libraries do not exist, configure UnixODBC again following the procedure provided in this section, and add the **lib** directory under its installation directory to **LD\_LIBRARY\_PATH**. If other libraries do not exist, add the **lib** directory under the ODBC driver package to **LD\_LIBRARY\_PATH**. + + +- \[UnixODBC\]connect to server failed: no such file or directory + + Possible causes: + + - An incorrect or unreachable database IP address or port number was configured. + + Check the **Servername** and **Port** configuration items in data sources. + + - Server monitoring is improper. + + If **Servername** and **Port** are correctly configured, ensure the proper network adapter and port are monitored by following the database server configurations in the procedure in this section. + + - Firewall and network gatekeeper settings are improper. + + Check firewall settings, and ensure that the database communication port is trusted. + + Check to ensure network gatekeeper settings are proper \(if any\). + + +- \[unixODBC\]The password-stored method is not supported. + + Possible causes: + + The **sslmode** configuration item is not configured in the data sources. + + Solution: + + Set the configuration item to **allow** or a higher level. For details, see [Table 3](#en-us_topic_0237120407_en-us_topic_0059778464_table22136585143846). + +- Server common name "xxxx" does not match host name "xxxxx" + + Possible causes: + + When **verify-full** is used for SSL encryption, the driver checks whether the host name in certificates is the same as the actual one. + + Solution: + + To solve this problem, use **verify-ca** to stop checking host names, or generate a set of CA certificates containing the actual host names. + +- Driver's SQLAllocHandle on SQL\_HANDLE\_DBC failed + + Possible causes: + + The executable file \(such as the **isql** tool of unixODBC\) and the database driver \(**psqlodbcw.so**\) depend on different library versions of ODBC, such as **libodbc.so.1** and **libodbc.so.2**. You can verify this problem by using the following method: + + ``` + ldd `which isql` | grep odbc + ldd psqlodbcw.so | grep odbc + ``` + + If the suffix digits of the outputs **libodbc.so** are different or indicate different physical disk files, this problem exists. Both **isql** and **psqlodbcw.so** load **libodbc.so**. If different physical files are loaded, different ODBC libraries with the same function list conflict with each other in a visible domain. As a result, the database driver cannot be loaded. + + Solution: + + Uninstall the unnecessary unixODBC, such as libodbc.so.2, and create a soft link with the same name and the .so.2 suffix for the remaining libodbc.so.1 library. + +- FATAL: Forbid remote connection with trust method! + + For security purposes, the primary database node forbids access from other nodes in openGauss without authentication. + + To access the primary database node from inside openGauss, deploy the ODBC program on the host where the primary database node is located and set the server address to **127.0.0.1**. It is recommended that the service system be deployed outside openGauss. If it is deployed inside, database performance may be affected. + +- \[unixODBC\]\[Driver Manager\]Invalid attribute value + + This problem occurs when you use SQL on other GaussDB. The possible cause is that the unixODBC version is not the recommended one. You are advised to run the **odbcinst --version** command to check the unixODBC version. + +- authentication method 10 not supported. + + If this error occurs on an open-source client, the cause may be: + + The database stores only the SHA-256 hash of the password, but the open-source client supports only MD5 hashes. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The database stores the hashes of user passwords instead of actual passwords. + >- If a password is updated or a user is created, both types of hashes will be stored, compatible with open-source authentication protocols. + >- An MD5 hash can only be generated using the original password, but the password cannot be obtained by reversing its SHA-256 hash. Passwords in the old version will only have SHA-256 hashes and not support MD5 authentication. + + To solve this problem, you can update the user password \(see [ALTER USER](alter-user.md)\) or create a user \(see [CREATE USER](create-user.md)\) having the same permissions as the faulty user. + +- unsupported frontend protocol 3.51: server supports 1.0 to 3.0 + + The database version is too early or the database is an open-source database. Use the driver of the required version to connect to the database. + +- FATAL: GSS authentication method is not allowed because XXXX user password is not disabled. + + In **pg\_hba.conf** of the target primary database node, the authentication mode is set to **gss** for authenticating the IP address of the current client. However, this authentication algorithm cannot authenticate clients. Change the authentication algorithm to **sha256** and try again. For details, see [7](#en-us_topic_0237120407_en-us_topic_0059778464_l4c0173b8af93447e91aba24005e368e5). + + diff --git a/content/en/docs/Developerguide/configuring-a-remote-connection.md b/content/en/docs/Developerguide/configuring-a-remote-connection.md new file mode 100644 index 000000000..f21147590 --- /dev/null +++ b/content/en/docs/Developerguide/configuring-a-remote-connection.md @@ -0,0 +1,41 @@ +# Configuring a Remote Connection + +Before the remote connection, you need to enable the client to access the database and configure the remote connection on the server where the database primary node is deployed. + +## Procedure + +Perform the following steps on the host where openGauss resides. + +1. Log in as the OS user **omm** to the primary node of the database. +2. Configure the client authentication mode. For details, see [Configuring Client Access Authentication](configuring-client-access-authentication.md). +3. Configure **[listen\_addresses](connection-settings.md#en-us_topic_0237124695_en-us_topic_0059777636_sed0adde99a3f47669f5d4ab557b36b35)**. **listen\_addresses** indicates the IP address or host name of the database primary node used for remote client connection. + 1. Run the following command to check the **listen\_addresses** value of the database primary node: + + ``` + gs_guc check -I all -c "listen_addresses" + ``` + + Information similar to the following is displayed: + + ``` + expected guc information: plat1: listen_addresses=NULL: [/gaussdb/data/data_cn/postgresql.conf] + gs_guc check: plat1: listen_addresses='localhost, 192.168.0.100': [/gaussdb/data/data_cn/postgresql.conf] + + Total GUC values: 1. Failed GUC values: 0. + The value of parameter listen_addresses is same on all instances. + listen_addresses='localhost, 192.168.0.100' + ``` + + 2. Run the following command to append the IP addresses to be added to **listen\_addresses**. Use commas \(,\) to separate multiple IP addresses. For example, add the IP address 10.11.12.13. + + ``` + gs_guc set -I all -c "listen_addresses='localhost,192.168.0.100,10.11.12.13'" + ``` + +4. Run the following command to restart the openGauss: + + ``` + gs_om -t stop && gs_om -t start + ``` + + diff --git a/content/en/docs/Developerguide/configuring-client-access-authentication.md b/content/en/docs/Developerguide/configuring-client-access-authentication.md new file mode 100644 index 000000000..ee80781f7 --- /dev/null +++ b/content/en/docs/Developerguide/configuring-client-access-authentication.md @@ -0,0 +1,115 @@ +# Configuring Client Access Authentication + +## Background + +If a host needs to connect to a database remotely, you need to add information about the host in configuration file of the database system and perform client access authentication. The configuration file \(**pg\_hba.conf** by default\) is stored in the data directory of the database. HBA is short for host-based authentication. + +- The system supports the following three authentication methods, which all require the **pg\_hba.conf** file. + - Host-based authentication: A server checks the configuration file based on the IP address, username, and target database of the client to determine whether the user can be authenticated. + - Password authentication: A password can be an encrypted password for remote connection or a non-encrypted password for local connection. + - SSL encryption: The OpenSSL is used to provide a secure connection between the server and the client. + +- In the **pg\_hba.conf** file, each record occupies one row and specifies an authentication rule. An empty row or a row started with a number sign \(\#\) is neglected. +- Each authentication rule consists of multiple columns separated by spaces and forward slashes \(/\), or spaces and tab characters. If a field is enclosed with quotation marks \("\), it can contain spaces. One record cannot span different rows. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Configure the client authentication mode and enable the client to connect to the host as user **jack**. User **omm** cannot be used for remote connection. + + Assume you are to allow the client whose IP address is **10.10.0.30** to access the current host. + + ``` + gs_guc set -N all -I all -h "host all jack 10.10.0.30/32 sha256" + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- Before using user **jack**, connect to the database locally and run the following command in the database to create user **jack**: + > ``` + > postgres=# CREATE USER jack PASSWORD 'Test@123'; + > ``` + >- **-N all** indicates all hosts in openGauss. + >- **-I all** indicates all instances on the host. + >- **-h** specifies statements that need to be added in the **pg\_hba.conf** file. + >- **all** indicates that a client can connect to any database. + >- **jack** indicates the user that accesses the database. + >- _10.10.0.30_/_32_ indicates that only the client whose IP address is **10.10.0.30** can connect to the host. The specified IP address must be different from those used in openGauss. **32** indicates that there are 32 bits whose value is 1 in the subnet mask. That is, the subnet mask is 255.255.255.255. + >- **sha256** indicates that the password of user **jack** is encrypted using the SHA-256 algorithm. + + This command adds a rule to the **pg\_hba.conf** file corresponds to the primary node of the database. The rule is used to authenticate clients that access primary node. + + Each record in the **pg\_hba.conf** file can be in one of the following four formats. For parameter description of the four formats, see [Configuration File Reference](configuration-file-reference.md). + + ``` + local DATABASE USER METHOD [OPTIONS] + host DATABASE USER ADDRESS METHOD [OPTIONS] + hostssl DATABASE USER ADDRESS METHOD [OPTIONS] + hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] + ``` + + During authentication, the system checks records in the **pg\_hba.conf** file in sequence for connection requests, so the record sequence is vital. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >Configure records in the **pg\_hba.conf** file from top to bottom based on communication and format requirements in the descending order of priorities. The IP addresses of the openGauss cluster and added hosts are of the highest priority and should be configured prior to those manually configured by users. If the IP addresses manually configured by users and those of added hosts are in the same network segment, delete the manually configured IP addresses before the scale-out and configure them after the scale-out. + + The suggestions on configuring authentication rules are as follows: + + - Records placed at the front have strict connection parameters but weak authentication methods. + - Records placed at the end have weak connection parameters but strict authentication methods. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- If a user wants to connect to a specified database, the user must be authenticated by the rules in the **pg\_hba.conf** file and have the **CONNECT** permission for the database. If you want to restrict a user from connecting to certain databases, you can grant or revoke the user's **CONNECT** permission, which is easier than setting rules in the **pg\_hba.conf** file. + >- The **trust** authentication mode is insecure for a connection between the openGauss and a client outside the cluster. In this case, set the authentication mode to **sha256**. + + +## Exception Handling + +There are many reasons for a user authentication failure. You can view an error message returned from a server to a client to determine the exact cause. [Table 1](#en-us_topic_0237121090_en-us_topic_0059778856_t451d737a3917467b9691ba1306766cdb) lists common error messages and solutions to these errors. + +**Table 1** Error messages + + + + + + + + + + + + + + + + +

Symptom

+

Solution

+

The username or password is incorrect.

+
FATAL: invalid username/password,login denied
+

Retry the authentication with the correct username and password.

+

The database to connect does not exist.

+
FATAL: database "TESTDB" does not exist
+

Retry the authentication with the correct database name.

+

No matched client record is found.

+
FATAL: no pg_hba.conf entry for host "10.10.0.60", user "ANDYM", database "TESTDB"
+

This message indicates that the server is connected but denies the connection request, because it does not find a matched record in pg_hba.conf. Contact the database administrator to add user information to the pg_hba.conf file.

+
+ +## Example + +``` +TYPE DATABASE USER ADDRESS METHOD + +"local" is for Unix domain socket connections only +#Allow only the user specified by the -U parameter during installation to establish a connection from the local server. +local all all trust +IPv4 local connections: +#User omm is allowed to connect to any database from the 10.10.0.50 host. The SHA-256 algorithm is used to encrypt the password. +host all omm 10.10.0.50/32 sha256 +#Any user is allowed to connect to any database from a host on the 10.10.0.0/24 network segment. The SHA-256 algorithm is used to encrypt the password and SSL transmission is used. +hostssl all all 10.10.0.0/24 sha256 +#Any user is allowed to connect to any database from a host on the 10.10.0.0/24 network segment. The Kerberos authentication is used. In the current version, Kerberos authentication cannot be used to connect to external clients. +host all all 10.10.0.0/24 gss include_realm=1 krb_realm=HADOOP.COM +``` + diff --git a/content/en/docs/Developerguide/configuring-database-audit.md b/content/en/docs/Developerguide/configuring-database-audit.md new file mode 100644 index 000000000..e34c367f2 --- /dev/null +++ b/content/en/docs/Developerguide/configuring-database-audit.md @@ -0,0 +1,11 @@ +# Configuring Database Audit + +- **[Overview](overview-2.md)** + +- **[Querying Audit Results](querying-audit-results.md)** + +- **[Maintaining Audit Logs](maintaining-audit-logs.md)** + +- **[Configuring File Permission Security Policies](configuring-file-permission-security-policies.md)** + + diff --git a/content/en/docs/Developerguide/configuring-file-permission-security-policies.md b/content/en/docs/Developerguide/configuring-file-permission-security-policies.md new file mode 100644 index 000000000..4d4a909cd --- /dev/null +++ b/content/en/docs/Developerguide/configuring-file-permission-security-policies.md @@ -0,0 +1,176 @@ +# Configuring File Permission Security Policies + +## Background + +During its installation, the database sets permissions for its files, including files \(such as log files\) generated during the running process. File permissions are set as follows: + +- The permission of program directories in the database is set to **0750**. +- The permission for data file directories in the database is set to **0700**. + + During openGauss deployment, the directory specified by the **tmpMppdbPath** parameter in the XML configuration file is created for storing **.s.PGSQL.\*** files. If the parameter is not specified, the **/tmp/**_$USER_**\_mppdb** directory is created. The directory and file permission is set to **0700**. + +- The permissions of data files and audit logs of the database, as well as data files generated by other database programs, are set to **0600**. The permission of run logs is equal to or lower than **0640** by default. +- Common OS users are not allowed to modify or delete database files and log files. + +## Directory and File Permissions of Database Programs + +[Table 1](#en-us_topic_0237121115_en-us_topic_0059779254_t0da233846f2544f39362bcf53de94799) lists some of program directories and file permissions of the installed database. + +**Table 1** Program directories and file permissions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

File or Directory

+

Parent Contents

+

Permissions

+

bin

+

-

+

0700

+

lib

+

-

+

0700

+

share

+

-

+

0700

+

data (database node/primary database node)

+

-

+

0700

+

base

+

Instance data directory

+

0700

+

global

+

Instance data directory

+

0700

+

pg_audit

+

Instance data directory (configurable)

+

0700

+

pg_log

+

Instance data directory (configurable)

+

0700

+

pg_xlog

+

Instance data directory

+

0700

+

postgresql.conf

+

Instance data directory

+

0600

+

pg_hba.conf

+

Instance data directory

+

0600

+

postmaster.opts

+

Instance data directory

+

0600

+

pg_ident.conf

+

Instance data directory

+

0600

+

gs_initdb

+

bin

+

0700

+

gs_dump

+

bin

+

0700

+

gs_ctl

+

bin

+

0700

+

gs_guc

+

bin

+

0700

+

gsql

+

bin

+

0700

+

archive_status

+

pg_xlog

+

0700

+

libpq.so.5.5

+

lib

+

0600

+
+ +## Suggestion + +During the installation, the database automatically sets permissions for its files, including files \(such as log files\) generated during the running process. The specified permissions meet permission requirements in most scenarios. If you have any special requirements for the related permissions, you are advised to periodically check the permission settings to ensure that the permissions meet the product requirements. + diff --git a/content/en/docs/Developerguide/configuring-llvm.md b/content/en/docs/Developerguide/configuring-llvm.md new file mode 100644 index 000000000..69dedaa9f --- /dev/null +++ b/content/en/docs/Developerguide/configuring-llvm.md @@ -0,0 +1,13 @@ +# Configuring LLVM + +Low Level Virtual Machine \(LLVM\) dynamic compilation can be used to generate customized machine code for each query to replace original common functions. Query performance is improved by reducing redundant judgment conditions and virtual function calls, and by making local data more accurate during actual queries. + +LLVM needs to consume extra time to pre-generate intermediate representation \(IR\) and compile it into codes. Therefore, if the data volume is small or if a query itself consumes less time, the performance deteriorates. + +- **[LLVM Application Scenarios and Restrictions](llvm-application-scenarios-and-restrictions.md)** + +- **[Other Factors Affecting LLVM Performance](other-factors-affecting-llvm-performance.md)** + +- **[Recommended Suggestions for LLVM](recommended-suggestions-for-llvm.md)** + + diff --git a/content/en/docs/Developerguide/configuring-running-parameters.md b/content/en/docs/Developerguide/configuring-running-parameters.md new file mode 100644 index 000000000..bd5738592 --- /dev/null +++ b/content/en/docs/Developerguide/configuring-running-parameters.md @@ -0,0 +1,7 @@ +# Configuring Running Parameters + +- **[Viewing Parameter Values](viewing-parameter-values.md)** + +- **[Resetting Parameters](resetting-parameters.md)** + + diff --git a/content/en/docs/Developerguide/confirming-connection-information.md b/content/en/docs/Developerguide/confirming-connection-information.md new file mode 100644 index 000000000..98b8ede76 --- /dev/null +++ b/content/en/docs/Developerguide/confirming-connection-information.md @@ -0,0 +1,48 @@ +# Confirming Connection Information + +You can use a client tool to connect to a database through the primary node of the database. Before the connection, obtain the IP address of the primary node of the database and the port number of the server where the primary node of the database is deployed. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Run the **gs\_om-t status--detail** command to query instances in the openGauss cluster. + + ``` + gs_om -t status --detail + ``` + + ``` + [ DBnode State ] + + node node_ip instance state + ----------------------------------------------------------------------------- + 1 plat1 192.168.0.11 5001 /srv/BigData/gaussdb/data1/dbnode Normal + 2 plat2 192.168.0.12 5002 /srv/BigData/gaussdb/data1/dbnode Normal + 3 plat3 192.168.0.13 5003 /srv/BigData/gaussdb/data1/dbnode Normal + ``` + + For example, the server IP addresses where the primary node of the database is deployed are 192.168.10.11, 192.168.10.12, and 192.168.0.13. The data path of the primary node of the database is **/srv/BigData/gaussdb/data1/dbnode**. + +3. Confirm the port number of the primary node of the database. + + View the port number in the **postgresql.conf** file in the data path of the database primary node obtained in [2](#en-us_topic_0237120290_en-us_topic_0062129725_li736435692628). The command is as follows: + + ``` + cat /srv/BigData/gaussdb/data1/dbnode/postgresql.conf | grep port + ``` + + ``` + port = 8000 # (change requires restart) + #comm_sctp_port = 1024 # Assigned by installation (change requires restart) + #comm_control_port = 10001 # Assigned by installation (change requires restart) + # supported by the operating system: + # e.g. 'localhost=10.145.130.2 localport=12211 remotehost=10.145.130.3 remoteport=12212, localhost=10.145.133.2 localport=12213 remotehost=10.145.133.3 remoteport=12214' + # e.g. 'localhost=10.145.130.2 localport=12311 remotehost=10.145.130.4 remoteport=12312, localhost=10.145.133.2 localport=12313 remotehost=10.145.133.4 remoteport=12314' + # %r = remote host and port + alarm_report_interval = 10 + support_extended_features=true + ``` + + **8000** is the port number of the database primary node. + + diff --git a/content/en/docs/Developerguide/connecting-to-a-database-0.md b/content/en/docs/Developerguide/connecting-to-a-database-0.md new file mode 100644 index 000000000..bd70d965e --- /dev/null +++ b/content/en/docs/Developerguide/connecting-to-a-database-0.md @@ -0,0 +1,140 @@ +# Connecting to a Database + +After a database is connected, you can use JDBC to run SQL statements to operate data. + +## Function Prototype + +JDBC provides the following three database connection methods: + +- DriverManager.getConnection\(String url\); +- DriverManager.getConnection\(String url, Properties info\); +- DriverManager.getConnection\(String url, String user, String password\); + +## Parameter + +**Table 1** Database connection parameters + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

url

+

postgresql.jar database connection descriptor. The format is as follows:

+
  • jdbc:postgresql:database
  • jdbc:postgresql://host/database
  • jdbc:postgresql://host:port/database
  • jdbc:postgresql://host:port/database?param1=value1&param2=value2
  • jdbc:postgresql://host1:port1,host2:port2/database?param1=value1&param2=value2
+
NOTE:
  • database indicates the name of the database to connect.
  • host indicates the name or IP address of the database server.

    If a machine connected to openGauss is not in the same network segment as openGauss, the IP address specified by host should be the value of coo.cooListenIp2 (application access IP address) set in Manager.

    +

    For security purposes, the primary database node forbids access from other nodes in openGauss without authentication. To access the primary database node from inside openGauss, deploy the JDBC program on the host where the primary database node is located and set host to 127.0.0.1. Otherwise, the error message "FATAL: Forbid remote connection with trust method!" may be displayed.

    +

    It is recommended that the service system be deployed outside openGauss. If it is deployed inside, database performance may be affected.

    +

    By default, the local host is used to connect to the server.

    +
  • port indicates the port number of the database server.

    By default, the database on port 5431 of the local host is connected.

    +
  • param indicates a database connection attribute.

    The parameter can be configured in the URL. The URL starts with a question mark (?), uses an equal sign (=) to assign a value to the parameter, and uses an ampersand (&) to separate parameters. You can also use the attributes of the info object for configuration. For details, see the example below.

    +
  • value indicates the database connection attribute values.
+
+

info

+

Database connection attributes. Common attributes are described as follows:

+
  • PGDBNAME: string type. This parameter specifies the database name. (This parameter does not need to be set in the URL. The system automatically parses the URL to obtain its value.)
  • PGHOST: string type. This parameter specifies the host IP address. For details, see the example below.
  • PGPORT: integer type. This parameter specifies the host port number. For details, see the example below.
  • user: string type. This parameter specifies the database user who creates the connection.
  • password: string type. This parameter specifies the password of the database user.
  • loggerLevel: string type. The following log levels are supported: OFF, DEBUG, and TRACE. The value OFF indicates that the log function is disabled. DEBUG and TRACE logs record information of different levels.
  • loggerFile: string type. This parameter specifies the name of a log file. You can specify a directory for storing logs. If no directory is specified, logs are stored in the directory where the client program is running.
  • allowEncodingChanges: Boolean type. If this parameter is set to true, the character set type can be changed. This parameter is used together with characterEncoding=CHARSET to set the character set. The two parameters are separated by ampersands (&).
  • currentSchema: string type. This parameter specifies the schema to be set in search-path.
  • hostRecheckSeconds: integer type. After JDBC attempts to connect to a host, the host status is saved: connection success or connection failure. This status is trusted within the duration specified by hostRecheckSeconds. After the duration expires, the status becomes invalid. The default value is 10 seconds.
  • ssl: Boolean type. This parameter specifies a connection in SSL mode.

    When this parameter is set to true, the NonValidatingFactory channel and certificate mode are supported.

    +

    For the NonValidatingFactory channel, configure the username and password and set SSL to true.

    +

    In certification mode, configure the client certificate, key, and root certificate, and set SSL to true.

    +
  • sslmode: string type. This parameter specifies the SSL authentication mode. The value can be require, verify-ca, or verify-full.
    • require: The system attempts to set up an SSL connection. If there is a CA file, the system performs verification as if the parameter was set to verify-ca.
    • verify-ca: The system attempts to set up an SSL connection and checks whether the server certificate is issued by a trusted CA.
    • verify-full: The system attempts to set up an SSL connection, checks whether the server certificate is issued by a trusted CA, and checks whether the host name of the server is the same as that in the certificate.
    +
  • sslcert: string type. This parameter specifies the complete path of the certificate file. The type of the client and server certificates is End Entity.
  • sslkey: string type. This parameter specifies the complete path of the key file. You must run the following command to convert the client certificate to the DER format:
    openssl pkcs8 -topk8 -outform DER -in client.key -out client.key.pk8 -nocrypt
    +
  • sslrootcert: string type. This parameter specifies the name of the SSL root certificate. The root certificate type is CA.
  • sslpassword: string type. This parameter is provided for ConsoleCallbackHandler.
  • sslpasswordcallback: string type. This parameter specifies the class name of the SSL password provider. The default value is org.postgresql.ssl.jdbc4.LibPQFactory.ConsoleCallbackHandler.
  • sslfactory: string type. This parameter specifies the class name used by SSLSocketFactory to establish an SSL connection.
  • sslfactoryarg: string type. The value is an optional parameter of the constructor function of the sslfactory class and is not recommended.
  • sslhostnameverifier: string type. This parameter specifies the class name of the host name verifier. The interface must implement javax.net.ssl.HostnameVerifier. The default value is org.postgresql.ssl.PGjdbcHostnameVerifier.
  • loginTimeout: integer type. This parameter specifies the waiting time for establishing the database connection, in seconds.
  • connectTimeout: integer type. This parameter specifies the timeout duration for connecting to a server, in seconds. If the time taken to connect to a server exceeds the value specified, the connection is interrupted. If the value is 0, the timeout mechanism is disabled.
  • socketTimeout: integer type. This parameter specifies the timeout duration for a socket read operation, in seconds. If the time taken to read data from a server exceeds the value specified, the connection is closed. If the value is 0, the timeout mechanism is disabled.
  • cancelSignalTimeout: integer type. Cancel messages may cause a block. This parameter controls connectTimeout and socketTimeout in a cancel message, in seconds. The default value is 10 seconds.
  • tcpKeepAlive: Boolean type. This parameter is used to enable or disable TCP keepalive detection. The default value is false.
  • logUnclosedConnections: Boolean type. The client may leak a connection object because it does not call the connection object's close() method. These objects will be collected as garbage and finalized using the finalize() method. If the caller ignores this operation, this method closes the connection.
  • assumeMinServerVersion: string type. The client sends a request to set a floating point. This parameter specifies the version of the server to connect, for example, assumeMinServerVersion=9.0. This parameter can reduce the number of packets to send during connection setup.
  • ApplicationName: string type. This parameter specifies the name of the JDBC driver that is being connected. You can query the pg_stat_activity table on the primary database node to view information about the client that is being connected. The JDBC driver name is displayed in the application_name column. The default value is PostgreSQL JDBC Driver.
  • connectionExtraInfo: Boolean type. This parameter specifies whether the JDBC driver reports the driver deployment path and process owner to the database.

    The value can be true or false. The default value is false. If connectionExtraInfo is set to true, the JDBC driver reports the driver deployment path and process owner to the database and displays the information in the connection_info parameter. In this case, you can query the information from PG_STAT_ACTIVITY.

    +
  • autosave: string type. The value can be always, never, or conservative. The default value is never. This parameter specifies the action that the driver should perform upon a query failure. If autosave is set to always, the JDBC driver sets a savepoint before each query and rolls back to the savepoint if the query fails. If autosave is set to never, there is no savepoint. If autosave is set to conservative, a savepoint is set for each query. However, the system rolls back and retries only when there is an invalid statement.
  • protocolVersion: integer type. This parameter specifies the connection protocol version. Only version 3 is supported. Note: MD5 encryption is used when this parameter is set. You must use the following command to change the database encryption mode: gs_guc set -N all -I all -c "password_encryption_type=1". After openGauss is restarted, create a user that uses MD5 encryption to encrypt passwords. You must also change the client connection mode to md5 in the pg_hba.conf file. Log in as the new user (not recommended).
  • prepareThreshold: integer type. This parameter specifies the time when the parse statement is sent. The default value is 5. It takes a long time to parse an SQL statement for the first time, but a short time to parse SQL statements later because of cache. If a session runs an SQL statement multiple consecutive times and the number of execution times exceeds the value of prepareThreshold, JDBC does not send the parse command to the SQL statement.
  • preparedStatementCacheQueries: integer type. This parameter specifies the number of queries cached in each connection. The default value is 256. If more than 256 different queries are used in the prepareStatement() call, the least recently used query cache will be discarded. The value 0 indicates that the cache function is disabled.
  • preparedStatementCacheSizeMiB: integer type. This parameter specifies the maximum cache size of each connection, in MB. The default value is 5. If the size of the cached queries exceeds 5 MB, the least recently used query cache will be discarded. The value 0 indicates that the cache function is disabled.
  • databaseMetadataCacheFields: integer type. The default value is 65536. This parameter specifies the maximum cache size of each connection. The value 0 indicates that the cache function is disabled.
  • databaseMetadataCacheFieldsMiB: integer type. The default value is 5. This parameter specifies the maximum cache size of each connection, in MB. The value 0 indicates that the cache function is disabled.
  • stringtype: string type. The value can be false, unspecified, or varchar. The default value is varchar. This parameter specifies the type of the PreparedStatement parameter used by the setString() method. If stringtype is set to varchar, these parameters are sent to the server as varchar parameters. If stringtype is set to unspecified, these parameters are sent to the server as an untyped value, and the server attempts to infer their appropriate type.
  • batchMode: Boolean type. This parameter specifies whether to connect the database in batch mode.
  • fetchsize: integer type. This parameter specifies the default fetchsize for statements in the created connection.
  • reWriteBatchedInserts: Boolean type. During batch import, this parameter, if set to on, can combine N insertion statements into one: insert into TABLE_NAME values(values1, ..., valuesN), ..., (values1, ..., valuesN). To use this parameter, set batchMode to off.
  • unknownLength: integer type. The default value is Integer.MAX_VALUE. This parameter specifies the length of the unknown length type when the data of some postgresql types (such as TEXT) is returned by functions such as ResultSetMetaData.getColumnDisplaySize and ResultSetMetaData.getPrecision.
  • defaultRowFetchSize: integer type. This parameter specifies the number of rows read by fetch in ResultSet at a time. Limiting the number of rows read each time in a database access request can avoid unnecessary memory consumption, thereby avoiding out of memory exception. The default value is 0, indicating that all rows are obtained at a time in ResultSet. There is no negative value.
  • binaryTransfer: Boolean type. This parameter specifies whether data is sent and received in binary format. The default value is false.
  • binaryTransferEnable: string type. This parameter specifies the type for which binary transmission is enabled. Every two types are separated by commas (,). You can select either the OID or name, for example, binaryTransferEnable=Integer4_ARRAY,Integer8_ARRAY.

    For example, if the OID name is BLOB and the OID number is 88, you can configure the OID as follows:

    +

    binaryTransferEnable=BLOB or binaryTransferEnable=88

    +
  • binaryTransferDisEnable: string type. This parameter specifies the type for which binary transmission is disabled. Every two types are separated by commas (,). You can select either the OID or name. The value of this parameter overwrites the value of binaryTransferEnable.
  • blobMode: string type. This parameter sets the setBinaryStream method to assign values to different types of data. The value on indicates that values are assigned to blob data. The value off indicates that values are assigned to bytea data. The default value is on.
  • socketFactory: string type. This parameter specifies the name of the class used to create a socket connection with the server. This class must implement the javax.net.SocketFactory interface and define a constructor with no parameter or a single string parameter.
  • socketFactoryArg: string type. The value is an optional parameter of the constructor function of the socketFactory class and is not recommended.
  • receiveBufferSize: integer type. This parameter is used to set SO_RCVBUF on the connection stream.
  • sendBufferSize: integer type. This parameter is used to set SO_SNDBUF on the connection stream.
  • preferQueryMode: string type. The value can be extended, extendedForPrepared, extendedCacheEverything, or simple. This parameter specifies the query mode. In simple mode, the query is executed without parsing or binding. In extended mode, the query is executed and bound. The extendedForPrepared mode is used for prepared statement extension. In extendedCacheEverything mode, each statement is cached.
+

user

+

Database user.

+

password

+

Password of the database user.

+
+ +## Examples + +``` +// The following code encapsulates database connection operations into an interface. The database can then be connected using an authorized username and a password. +public static Connection getConnect(String username, String passwd) + { + // Driver class. + String driver = "org.postgresql.Driver"; + // Database connection descriptor. + String sourceURL = "jdbc:postgresql://10.10.0.13:8000/postgres"; + Connection conn = null; + + try + { + // Load the driver. + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + // Create a connection. + conn = DriverManager.getConnection(sourceURL, username, passwd); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; +// The following code uses the Properties object as a parameter to establish a connection. +public static Connection getConnectUseProp(String username, String passwd) + { + // Driver class. + String driver = "org.postgresql.Driver"; + // Database connection descriptor. + String sourceURL = "jdbc:postgresql://10.10.0.13:8000/postgres?"; + Connection conn = null; + Properties info = new Properties(); + + try + { + // Load the driver. + Class.forName(driver); + } + catch( Exception e ) + { + e.printStackTrace(); + return null; + } + + try + { + info.setProperty("user", username); + info.setProperty("password", passwd); + // Create a connection. + conn = DriverManager.getConnection(sourceURL, info); + System.out.println("Connection succeed!"); + } + catch(Exception e) + { + e.printStackTrace(); + return null; + } + + return conn; + }; +``` + diff --git a/content/en/docs/Developerguide/connecting-to-a-database.md b/content/en/docs/Developerguide/connecting-to-a-database.md new file mode 100644 index 000000000..0f28c5813 --- /dev/null +++ b/content/en/docs/Developerguide/connecting-to-a-database.md @@ -0,0 +1,16 @@ +# Connecting to a Database + +Client tools used for connecting to a database include **gsql** and APIs \(such as **ODBC** and **JDBC**\). + +- **gsql** is a client tool provided by openGauss. As described in [Using gsql to Connect to a Database](using-gsql-to-connect-to-a-database.md), **psql** is used to enter, edit, and run SQL statements in an interactive manner. +- As described in [APIs](apis.md), standard databases, such as **ODBC** and **JDBC**, can be used to develop openGauss-based applications. + +- **[Confirming Connection Information](confirming-connection-information.md)** + +- **[Configuring a Remote Connection](configuring-a-remote-connection.md)** + +- **[Using gsql to Connect to a Database](using-gsql-to-connect-to-a-database.md)** + +- **[APIs](apis.md)** + + diff --git a/content/en/docs/Developerguide/connecting-to-the-database-(using-ssl).md b/content/en/docs/Developerguide/connecting-to-the-database-(using-ssl).md new file mode 100644 index 000000000..464b4116c --- /dev/null +++ b/content/en/docs/Developerguide/connecting-to-the-database-(using-ssl).md @@ -0,0 +1,136 @@ +# Connecting to the Database \(Using SSL\) + +When establishing connections to the openGauss server using JDBC, you can enable SSL connections to encrypt client and server communications for security of sensitive data transmission on the Internet. This section describes how applications establish an SSL connection to openGauss using JDBC. To start the SSL mode, you must have the server certificate, client certificate, and private key files. For details on how to obtain these files, see related documents and commands of OpenSSL. + +## Configuring the Server + +The SSL mode requires a root certificate, a server certificate, and a private key. + +Perform the following operations \(assuming that the license files are saved in the data directory **/gaussdb/data/datanode** and the default file names are used\): + +1. Log in as the OS user **omm** to the primary node of the database. +2. Generate and import a certificate. + + Generate an SSL certificate. For details, see [Generating Certificates](generating-certificates.md). Copy the generated **server.crt**, **server.key**, and **cacert.pem** files to the data directory on the server. + + Run the following command to query the data directory of the database node. The instance column indicates the data directory. + + ``` + gs_om -t status --detail + ``` + + In the Unix OS, **server.crt** and **server.key** must deny the access from the external or any group. Run the following command to set this permission: + + ``` + chmod 0600 server.key + ``` + +3. Enable the SSL authentication mode. + + ``` + gs_guc set -D /gaussdb/data/datanode -c "ssl=on" + ``` + +4. Set client access authentication parameters. The IP address is the IP address of the host to be connected. + + ``` + gs_guc reload -D /gaussdb/data/datanode -h "hostssl all all 127.0.0.1/32 cert" + gs_guc reload -D /gaussdb/data/datanode -h "hostssl all all IP/32 cert" + ``` + + Clients on the **127.0.0.1/32** network segment can connect to openGauss servers in SSL mode. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >If **METHOD** is set to **cert** in the **pg\_hba.conf** file of the server, the client must use the username \(common name\) configured in the license file \(**client.crt**\) for the database connection. If **METHOD** is set to **md5** or **sha256**, there is no such a restriction. + +5. Configure the digital certificate parameters related to SSL authentication. + + The information following each command indicates operation success. + + ``` + gs_guc set -D /gaussdb/data/datanode -c "ssl_cert_file='server.crt'" + gs_guc set: ssl_cert_file='server.crt' + ``` + + ``` + gs_guc set -D /gaussdb/data/datanode -c "ssl_key_file='server.key'" + gs_guc set: ssl_key_file='server.key' + ``` + + ``` + gs_guc set -D /gaussdb/data/datanode -c "ssl_ca_file='cacert.pem'" + gs_guc set: ssl_ca_file='cacert.pem' + ``` + +6. Restart the database. + + ``` + gs_om -t stop && gs_om -t start + ``` + +7. Generate and upload a certificate file. + +## Configuring the Client + +To configure the client, perform the following steps: + +Upload the certificate files **client.key.pk8**, **client.crt**, and **cacert.pem** generated in [Configuring the Server](#en-us_topic_0237120382_en-us_topic_0213179127_en-us_topic_0189251215_en-us_topic_0059777633_s513e457bfaa24ce4b1a20a1f2322f9ae) to the client. + +## Example + +``` +import java.sql.Connection; +import java.util.Properties; +import java.sql.DriverManager; +import java.sql.Statement; +import java.sql.ResultSet; + +public class SSL{ + public static void main(String[] args) { + Properties urlProps = new Properties(); + String urls = "jdbc:postgresql://10.29.37.136:8000/postgres"; + + /** +* ================== Example 1: The NonValidatingFactory channel is used, and MTETHOD in the pg_hba.conf file is not cert. + */ +/* + urlProps.setProperty("sslfactory","org.postgresql.ssl.NonValidatingFactory"); + urlProps.setProperty("user", "world"); +//test@123 is the password specified when user CREATE USER world WITH PASSWORD 'test123@' is created. + urlProps.setProperty("password", "test@123"); + urlProps.setProperty("ssl", "true"); +*/ + /** +* ================== Example 2 - 5: Use a certificate. In the pg_hba.conf file, MTETHOD is cert. + */ + urlProps.setProperty("sslcert", "client.crt"); +// Client key in DER format + urlProps.setProperty("sslkey", "client.key.pk8"); + urlProps.setProperty("sslrootcert", "cacert.pem"); + urlProps.setProperty("user", "world"); + /* ================== Example 2: Set ssl to true to use the certificate for authentication.*/ + urlProps.setProperty("ssl", "true"); + /* ================== Example 3: Set sslmode to require to use the certificate for authentication. */ +// urlProps.setProperty("sslmode", "require"); + /* ================== Example 4: Set sslmode to verify-ca to use the certificate for authentication. */ +// urlProps.setProperty("sslmode", "verify-ca"); + /* ================== Example 5: Set sslmode to verify-full to use the certificate (in the Linux OS) for authentication. */ +// urls = "jdbc:postgresql://world:8000/postgres"; +// urlProps.setProperty("sslmode", "verify-full"); + + try { + Class.forName("org.postgresql.Driver").newInstance(); + } catch (Exception e) { + e.printStackTrace(); + } + try { + Connection conn; + conn = DriverManager.getConnection(urls,urlProps); + conn.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } +} +``` + diff --git a/content/en/docs/Developerguide/connection-and-authentication.md b/content/en/docs/Developerguide/connection-and-authentication.md new file mode 100644 index 000000000..b1c7a3461 --- /dev/null +++ b/content/en/docs/Developerguide/connection-and-authentication.md @@ -0,0 +1,9 @@ +# Connection and Authentication + +- **[Connection Settings](connection-settings.md)** + +- **[Security and Authentication \(postgresql.conf\)](security-and-authentication-(postgresql-conf).md)** + +- **[Communication Library Parameters](communication-library-parameters.md)** + + diff --git a/content/en/docs/Developerguide/connection-characters.md b/content/en/docs/Developerguide/connection-characters.md new file mode 100644 index 000000000..37ae9ef47 --- /dev/null +++ b/content/en/docs/Developerguide/connection-characters.md @@ -0,0 +1,88 @@ +# Connection Characters + +**Table 1** Connection strings + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Character String

+

Description

+

host

+

Name of the host to connect to. If the host name starts with a slash (/), Unix-domain socket communications instead of TCP/IP communications are used. The value is the directory where the socket file is stored. If host is not specified, the default behavior is to connect to the Unix-domain socket in the /tmp directory (or the socket directory specified during GaussDB installation). On a machine without a Unix-domain socket, the default behavior is to connect to localhost.

+

hostaddr

+

IP address of the host to connect to. The value is in standard IPv4 address format, for example, 172.28.40.9. If your machine supports IPv6, IPv6 address can also be used. If a non-null string is specified, TCP/IP communications are used.

+

Replacing host with hostaddr can prevent applications from querying host names, which may be important for applications with time constraints. However, a host name is required for GSSAPI or SSPI authentication methods. Therefore, the following rules are used:

+
  1. If host is specified but hostaddr is not, a query for the host name will be executed.
  2. If hostaddr is specified but host is not, the value of hostaddr is the server network address. If the host name is required by authentication, the connection attempt fails.
  3. If both host and hostaddr are specified, the value of hostaddr is the server network address. The value of host is ignored unless it is required by authentication, in which case it is used as the host name.
    NOTICE:
    • If host is not the server name in the network address specified by hostaddr, the authentication may fail.
    • If neither host nor hostaddr is specified, libpq will use a local Unix-domain socket for connection. If the machine does not have a Unix-domain socket, it will attempt to connect to localhost.
    +
    +
+

port

+

Port number of the host server, or the socket file name extension for Unix-domain connections.

+

user

+

Name of the user to connect as. By default, the username is the same as the operating system name of the user running the application.

+

dbname

+

Database name. The default value is the same as the username.

+

password

+

Password to be used if the server requires password authentication.

+

connect_timeout

+

Maximum timeout period of the connection, in seconds (in decimal integer string). The value 0 or null indicates infinity. You are not advised to set the connection timeout period to a value less than 2 seconds.

+

client_encoding

+

Client encoding for the connection. In addition to the values accepted by the corresponding server options, you can use auto to determine the correct encoding from the current environment in the client (the LC_CTYPE environment variable in the Unix system).

+

options

+

Adds command-line options to send to the server at runtime.

+

application_name

+

Current user identity.

+

keepalives

+

Whether TCP keepalive is enabled on the client side. The default value is 1, indicating that the function is enabled. The value 0 indicates that the function is disabled. Ignore this parameter for Unix-domain connections.

+

keepalives_idle

+

The number of seconds of inactivity after which TCP should send a keepalive message to the server. The value 0 indicates that the default value is used. Ignore this parameter for Unix-domain connections or if keep-alive is disabled.

+

keepalives_interval

+

The number of seconds after which a TCP keepalive message that is not acknowledged by the server should be retransmitted. The value 0 indicates that the default value is used. Ignore this parameter for Unix-domain connections or if keep-alive is disabled.

+

keepalives_count

+

Adds command-line options to send to the server at runtime. For example, adding -c comm_debug_mode=off to set the value of the GUC parameter comm_debug_mode to off.

+
+ diff --git a/content/en/docs/Developerguide/connection-pool-parameters.md b/content/en/docs/Developerguide/connection-pool-parameters.md new file mode 100644 index 000000000..43930af86 --- /dev/null +++ b/content/en/docs/Developerguide/connection-pool-parameters.md @@ -0,0 +1,37 @@ +# Connection Pool Parameters + +When a connection pool is used to access the database, database connections are established and then stored in the memory as objects during system running. When you need to access the database, no new connection is established. Instead, an existing idle connection is selected from the connection pool. After you finish accessing the database, the database does not disable the connection but puts it back into the connection pool. The connection can be used for the next access request. + +## pooler\_maximum\_idle\_time + +**Parameter description**: Specifies the maximum amount of time that the connections can remain idle in a pool before being removed. After that, the automatic connection clearing mechanism is triggered to reduce the number of connections on each node to the value of **minimum\_pool\_size**. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from 0 to _INT\_MAX_. The smallest unit is m. + +**Default value**: **1h** \(60 minutes\) + +## minimum\_pool\_size + +**Parameter description**: Specifies the minimum number of remaining connections in the pool on each node after the automatic connection clearing is triggered. If this parameter is set to **0**, the automatic connection clearing is disabled. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: an integer ranging from 1 to 65535 + +**Default value**: **200** + +## cache\_connection + +**Parameter description**: Specifies whether to reclaim the connections of a connection pool. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 2](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t290c8f15953843db8d8e53d867cd893d). + +**Value range**: Boolean + +- **on** indicates that the connections of a connection pool will be reclaimed. +- **off** indicates that the connections of a connection pool will not be reclaimed. + +**Default value**: **on** + diff --git a/content/en/docs/Developerguide/connection-settings.md b/content/en/docs/Developerguide/connection-settings.md new file mode 100644 index 000000000..7fe3106a6 --- /dev/null +++ b/content/en/docs/Developerguide/connection-settings.md @@ -0,0 +1,143 @@ +# Connection Settings + +This section describes parameters related to client-server connection modes. + +## listen\_addresses + +**Parameter description**: Specifies the TCP/IP addresses that a server listens to for connections from the client. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: + +- Host name or IP address. Multiple values are separated with commas \(,\). +- Asterisk \(\*\) or **0.0.0.0**, indicating that all IP addresses will be listened to, which is not recommended due to potential security risks. This parameter must be used together with valid addresses \(for example, the local IP address\). Otherwise, the build may fail. +- If the parameter is not specified, the server does not listen to any IP address. In this case, only Unix domain sockets can be used for database connections. + +**Default value**: **localhost** + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>**localhost** indicates that only local loopback is allowed. + +## local\_bind\_address + +**Parameter description**: Specifies the host IP address bound to the current node for connecting to other nodes in openGauss. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Default value**: **0.0.0.0** \(The actual value is specified in the configuration file during installation.\) + +## port + +**Parameter description**: Specifies the TCP port listened to by openGauss. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1 to 65535 + +**Default value**: **5432** \(The actual value is specified in the configuration file during installation.\) + +## max\_connections + +**Parameter description**: Specifies the maximum number of concurrent connections to the database. This parameter influences the concurrent processing capability of openGauss. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1 to 262143 + +**Default value**: **5000** for each database node If the default value is greater than the maximum number supported by the kernel \(determined when the **gs\_initdb** command is executed\), an error message is displayed. + +**Setting suggestions**: + +Retain the default value of this parameter on the primary node of the database. + +If this parameter is set to a large value, openGauss requires more System V shared memories or semaphores, which may exceed the default maximum configuration of the OS. In this case, modify the value as needed. + +## sysadmin\_reserved\_connections + +**Parameter description**: Specifies the minimum number of connections reserved for administrators. You are advised not to set this parameter to a large value. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to _MIN_ \(which takes the smaller value between **262143** and **max\_connections**\) + +**Default value**: **3** + +## unix\_socket\_directory + +**Parameter description**: Specifies the Unix domain socket directory that the openGauss server listens to for connections from the client. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +The parameter length limit varies by OS. If the length is exceeded, the error "Unix-domain socket path xxx is too long" will be reported. + +**Value range**: a string + +**Default value**: empty. The actual value is specified by the configuration file during installation. + +## unix\_socket\_group + +**Parameter description**: Specifies the group of the Unix domain socket \(the user of a socket is the user that starts the server\). This parameter can work with **[unix\_socket\_permissions](#en-us_topic_0237124695_en-us_topic_0059777636_s09d0cf55124b4f1aa3d401d18b9b4151)** to control socket access. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string. If this parameter is set to an empty string, the default group of the current user is used. + +**Default value**: empty + +## unix\_socket\_permissions + +**Parameter description**: Specifies access permissions for the Unix domain socket. + +The Unix domain socket uses the usual permission set of the Unix file system. The value of this parameter should be a number \(acceptable for the **chmod** and **umask** commands\). If a user-defined octal format is used, the number must start with 0. + +You are advised to set it to **0770** \(only allowing access from users connecting to the database and users in the same group as them\) or **0700** \(only allowing access from users connecting to the database\). + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: 0000 to 0777 + +**Default value**: **0700** + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>In the Linux OS, a document has one document attribute and nine permission attributes, which consists of the read \(r\), write \(w\), and execute \(x\) permissions of the Owner, Group, and Others groups. +>The r, w, and x permissions are represented by the following numbers: +>r: 4 +>w: 2 +>x: 1 +>-: 0 +>The three attributes in a group are accumulative. +>For example, **-rwxrwx---** indicates the following permissions: +>owner = rwx = 4+2+1 = 7 +>group = rwx = 4+2+1 = 7 +>others = --- = 0+0+0 = 0 +>The permission of the file is 0770. + +## application\_name + +**Parameter description**: Specifies the client name used in the current connection request. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +**Default value**: empty \(The actual value is the name of the application connected to the backend.\) + +## connection\_info + +**Parameter description**: Specifies the database connection information, including the driver type, driver version, driver deployment path, and process owner. + +This parameter is a USERSET parameter used for O&M. You are advised not to change the parameter value. + +**Value range**: a string + +**Default value**: empty + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- An empty string indicates that the driver connected to the database does not support automatic setting of the **connection\_info** parameter or the parameter is not set by users in applications. +>- The following is an example of the concatenated value of **connection\_info**: +> ``` +> {"driver_name":"ODBC","driver_version": "(GaussDB Kernel V500R001C00 build 290d125f) compiled at 2020-05-08 02:59:43 commit 2143 last mr 131 debug","driver_path":"/usr/local/lib/psqlodbcw.so","os_user":"omm"} +> ``` +> **driver\_name** and **driver\_version** are displayed by default. Whether **driver\_path** and **os\_user** are displayed is determined by users. For details, see [Connecting to a Database](connecting-to-a-database-0.md) and [Configuring a Data Source in the Linux OS](configuring-a-data-source-in-the-linux-os.md). + diff --git a/content/en/docs/Developerguide/constant-and-macro.md b/content/en/docs/Developerguide/constant-and-macro.md new file mode 100644 index 000000000..1975af635 --- /dev/null +++ b/content/en/docs/Developerguide/constant-and-macro.md @@ -0,0 +1,113 @@ +# Constant and Macro + +[Table 1](#en-us_topic_0237121963_en-us_topic_0059778360_en-us_topic_0058965862_table49126904) lists the constants and macros that can be used in openGauss. + +**Table 1** Constant and macro + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Example

+

CURRENT_CATALOG

+

Specifies the current database.

+
postgres=# SELECT CURRENT_CATALOG;
+current_database
+------------------
+postgres
+(1 row)
+

CURRENT_ROLE

+

Specifies the current user.

+
postgres=# SELECT CURRENT_ROLE;
+current_user
+--------------
+omm
+(1 row)
+

CURRENT_SCHEMA

+

Specifies the current database mode.

+
postgres=# SELECT CURRENT_SCHEMA;
+current_schema
+----------------
+public
+(1 row)
+

CURRENT_USER

+

Specifies the current user.

+
postgres=# SELECT CURRENT_USER;
+current_user
+--------------
+omm
+(1 row)
+

LOCALTIMESTAMP

+

Specifies the current session time (without time zone).

+
postgres=# SELECT LOCALTIMESTAMP;
+         timestamp
+----------------------------
+2015-10-10 15:37:30.968538
+(1 row)
+

NULL

+

This parameter is left blank.

+

N/A

+

SESSION_USER

+

Specifies the current system user.

+
postgres=# SELECT SESSION_USER;
+session_user
+--------------
+omm
+(1 row)
+

SYSDATE

+

Specifies the current system date.

+
postgres=# SELECT SYSDATE;
+sysdate
+---------------------
+2015-10-10 15:48:53
+(1 row)
+

USER

+

Specifies the current user, also called CURRENT_USER.

+
postgres=# SELECT USER;
+current_user
+--------------
+omm
+(1 row)
+
+ diff --git a/content/en/docs/Developerguide/constraints-on-index-use.md b/content/en/docs/Developerguide/constraints-on-index-use.md new file mode 100644 index 000000000..32cc0932d --- /dev/null +++ b/content/en/docs/Developerguide/constraints-on-index-use.md @@ -0,0 +1,37 @@ +# Constraints on Index Use + +The following is an example of index use: + +``` +postgres=# create table table1 (c_int int,c_bigint bigint,c_varchar varchar,c_text text) with(orientation=row); + +postgres=# create text search configuration ts_conf_1(parser=POUND); +postgres=# create text search configuration ts_conf_2(parser=POUND) with(split_flag='%'); + +postgres=# set default_text_search_config='ts_conf_1'; +postgres=# create index idx1 on table1 using gin(to_tsvector(c_text)); + +postgres=# set default_text_search_config='ts_conf_2'; +postgres=# create index idx2 on tscp_u_m_005_tbl using gin(to_tsvector(c_text)); + +postgres=# select c_varchar,to_tsvector(c_varchar) from table1 where to_tsvector(c_text) @@ plainto_tsquery('¥#@...&**') and to_tsvector(c_text) @@ postgres=# plainto_tsquery('Company ') and c_varchar is not null order by 1 desc limit 3; +``` + +In this example, **table1** has two GIN indexes created on the same column **c\_text**, **idx1** and **idx2**, but these two indexes are created under different settings of [default\_text\_search\_config](zone-and-formatting.md#en-us_topic_0237124733_en-us_topic_0059778109_sd9a07d429cd4498383931c621742b816). Differences between this example and the scenario where one table has common indexes created on the same column are as follows: + +- GIN indexes use different parsers \(that is, different delimiters\). In this case, the index data of **idx1** is different from that of **idx2**. +- In the specified scenario, the index data of multiple common indexes created on the same column is the same. + +As a result, using **idx1** and **idx2** for the same query returns different results. + +## Constraints + +Still use the above example. When: + +- Multiple GIN indexes are created on the same column of the same table. +- The GIN indexes use different parsers \(that is, different delimiters\). +- The column is used in a query, and an index scan is used in the execution plan. + + To avoid different query results caused by different GIN indexes, ensure that only one GIN index is available on a column of the physical table. + + diff --git a/content/en/docs/Developerguide/control-statements.md b/content/en/docs/Developerguide/control-statements.md new file mode 100644 index 000000000..e22f2e0ac --- /dev/null +++ b/content/en/docs/Developerguide/control-statements.md @@ -0,0 +1,17 @@ +# Control Statements + +- **[RETURN Statements](return-statements.md)** + +- **[Conditional Statements](conditional-statements.md)** + +- **[Loop Statements](loop-statements.md)** + +- **[Branch Statements](branch-statements.md)** + +- **[NULL Statements](null-statements.md)** + +- **[Error Trapping Statements](error-trapping-statements.md)** + +- **[GOTO Statements](goto-statements.md)** + + diff --git a/content/en/docs/Developerguide/controlling-text-search.md b/content/en/docs/Developerguide/controlling-text-search.md new file mode 100644 index 000000000..6c1a7aaea --- /dev/null +++ b/content/en/docs/Developerguide/controlling-text-search.md @@ -0,0 +1,11 @@ +# Controlling Text Search + +- **[Parsing Documents](parsing-documents.md)** + +- **[Parsing Queries](parsing-queries.md)** + +- **[Ranking Search Results](ranking-search-results.md)** + +- **[Highlighting Results](highlighting-results.md)** + + diff --git a/content/en/docs/Developerguide/controlling-transactions.md b/content/en/docs/Developerguide/controlling-transactions.md new file mode 100644 index 000000000..44b97d362 --- /dev/null +++ b/content/en/docs/Developerguide/controlling-transactions.md @@ -0,0 +1,23 @@ +# Controlling Transactions + +A transaction is a user-defined sequence of database operations, which form an integral unit of work. + +## Starting a Transaction + +openGauss starts a transaction using **START TRANSACTION** and **BEGIN**. For details, see [START TRANSACTION](start-transaction.md) and [BEGIN](begin.md). + +## Setting a Transaction + +openGauss sets a transaction using **SET TRANSACTION** or **SET LOCAL TRANSACTION**. For details, see [SET TRANSACTION](set-transaction.md). + +## Committing a Transaction + +openGauss commits all operations of a transaction using **COMMIT** or **END**. For details, see [COMMIT | END](commit-end.md). + +## Rolling Back a Transaction + +If a fault occurs during a transaction and the transaction cannot proceed, the system performs rollback to cancel all the completed database operations related to the transaction. See [ROLLBACK](rollback.md). + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If an execution request \(not in a transaction block\) received in the database contains multiple statements, the request is packed into a transaction. If one of the statements fails, the entire request will be rolled back. + diff --git a/content/en/docs/Developerguide/conversion-example.md b/content/en/docs/Developerguide/conversion-example.md new file mode 100644 index 000000000..7ca0193e6 --- /dev/null +++ b/content/en/docs/Developerguide/conversion-example.md @@ -0,0 +1,68 @@ +# Conversion Example + +Let’s say that you have a database name **benchmarksql** and a table named **customer** \(which is a disk-based table\) to be migrated it into a MOT table. + +To migrate the customer table into a MOT table, perform the following – + +- Check your source table column types. Verify that all types are supported by MOT, refer to section Unsupported Data Types. + + ``` + benchmarksql-# \d+ customer + Table "public.customer" + Column | Type | Modifiers | Storage | Stats target | Description + --------+---------+-----------+---------+--------------+------------- + x | integer | | plain | | + y | integer | | plain | | + Has OIDs: no + Options: orientation=row, compression=no + ``` + +- Check your source table data. + + ``` + benchmarksql=# select * from customer; + x | y + ---+--- + 1 | 2 + 3 | 4 + (2 rows) + ``` + +- Dump table data only by using **gs\_dump**. + + ``` + $ gs_dump -Fc benchmarksql -a --table customer -f customer.dump + gs_dump[port='15500'][benchmarksql][2020-06-04 16:45:38]: dump database benchmarksql successfully + gs_dump[port='15500'][benchmarksql][2020-06-04 16:45:38]: total time: 332 ms + Rename the source table name. + benchmarksql=# alter table customer rename to customer_bk; + ALTER TABLE + Create the MOT table to be exactly the same as the source table. + benchmarksql=# create foreign table customer (x int, y int); + CREATE FOREIGN TABLE + benchmarksql=# select * from customer; + x | y + ---+--- + (0 rows) + Import the source dump data into the new MOT table. + $ gs_restore -C -d benchmarksql customer.dump + restore operation successful + total time: 24 ms + Check that the data was imported successfully. + benchmarksql=# select * from customer; + x | y + ---+--- + 1 | 2 + 3 | 4 + (2 rows) + + benchmarksql=# \d + List of relations + Schema | Name | Type | Owner | Storage + --------+-------------+---------------+--------+---------------------------------- + public | customer | foreign table | aharon | + public | customer_bk | table | aharon | {orientation=row,compression=no} + (2 rows) + ``` + + diff --git a/content/en/docs/Developerguide/converting-a-disk-table-into-a-mot-table.md b/content/en/docs/Developerguide/converting-a-disk-table-into-a-mot-table.md new file mode 100644 index 000000000..cbe2cea04 --- /dev/null +++ b/content/en/docs/Developerguide/converting-a-disk-table-into-a-mot-table.md @@ -0,0 +1,13 @@ +# Converting a Disk Table into a MOT Table + +The direct conversion of disk tables into MOT tables is not yet possible, meaning that no ALTER TABLE statement yet exists that converts a disk-based table into a MOT table. + +The following describes how to manually perform a few steps in order to convert a disk-based table into a MOT table, as well as how the **gs\_dump** tool is used to export data and the **gs\_restore **tool is used to import data. + +- **[Prerequisite Check](prerequisite-check.md)** + +- **[Converting](converting.md)** + +- **[Conversion Example](conversion-example.md)** + + diff --git a/content/en/docs/Developerguide/converting.md b/content/en/docs/Developerguide/converting.md new file mode 100644 index 000000000..a544711fa --- /dev/null +++ b/content/en/docs/Developerguide/converting.md @@ -0,0 +1,14 @@ +# Converting + +To covert a disk-based table into a MOT table, perform the following: + +1. Suspend application activity. +2. Use **gs\_dump** tool to dump the table’s data into a physical file on disk. Make sure to use the **data only**. +3. Rename your original disk-based table. +4. Create a MOT table with the same table name and schema. Make sure to use the create FOREIGN keyword to specify that it will be a MOT table. +5. Use** gs\_restore** to load/restore data from the disk file into the database table. +6. Visually/manually verify that all the original data was imported correctly into the new MOT table. An example is provided below. +7. Resume application activity. + +**IMPORTANT Note** – In this way, since the table name remains the same, application queries and relevant database stored-procedures will be able to access the new MOT table seamlessly without code changes. Notice, that due to the current limitation of unsporting cross-engine queries and transactions, in the case queries and stored procedures do joins, unions and similar multi-table queries - it is required to complete the conversion steps to all relevant tables. UNCLEAR – PLEASE FIX VARIOUS THINGS IN THIS SENTENCE GGG + diff --git a/content/en/docs/Developerguide/copy.md b/content/en/docs/Developerguide/copy.md new file mode 100644 index 000000000..2da5b6c3b --- /dev/null +++ b/content/en/docs/Developerguide/copy.md @@ -0,0 +1,615 @@ +# COPY + +## Function + +**COPY** copies data between tables and files. + +**COPY FROM** copies data from a file to a table, and **COPY TO** copies data from a table to a file. + +## Precautions + +- To run the **COPY FROM FILENAME** or **COPY TO FILENAME** statement, you must have the **SYSADMIN** permission. By default, user **SYSADMIN** is not allowed to run the **COPY FROM FILENAME** or **COPY TO FILENAME** statement on database configuration files, key files, certificate files, and audit logs, preventing user **SYSADMIN** from viewing or modifying sensitive files without authorization. To grant the permission, you need to change the setting of **enable\_copy\_server\_files**. +- **COPY** applies only to tables but not views. +- To insert data to a table, you must have the permission to insert data. +- If a list of columns is specified, **COPY** copies only the data of the specified columns between the file and the table. If a table has any columns that are not in the column list, **COPY FROM** inserts default values for those columns. +- If a data source file is specified, the server must be able to access the file. If **STDIN** is specified, data flows between the client and the server. When entering data, use the **TAB** key to separate the columns of the table and use a backslash and a period \(\\.\) in a new row to indicate the end of the input. +- **COPY FROM** throws an error if any row in the data file contains more or fewer columns than expected. +- The end of the data can be represented by a line that contains only a backslash and a period \(\\.\). If data is read from a file, the end flag is unnecessary. If data is copied between client applications, an end tag must be provided. +- In **COPY FROM**, **\\N** is an empty string. To enter the actual value **\\N**, use **\\\\N**. + +- **COPY FROM** does not support data preprocessing during data import, such as expression operation and default value filling. If you need to preprocess data during the import, you need to import the data to a temporary table and then run SQL statements to insert the data to the table through operations. However, this method causes I/O expansion and reduces the import performance. +- When a data format error occurs during **COPY FROM** execution, the transaction is rolled back. However, the error information is insufficient, making it difficult to locate the error data from a large amount of raw data. +- **COPY FROM** and **COPY TO** apply to low concurrency and local import and export of a small amount of data. + +## Syntax + +- Copy data from a file to a table. + + ``` + COPY table_name [ ( column_name [, ...] ) ] + FROM { 'filename' | STDIN } + [ [ USING ] DELIMITERS 'delimiters' ] + [ WITHOUT ESCAPING ] + [ LOG ERRORS ] + [ REJECT LIMIT 'limit' ] + [ WITH ( option [, ...] ) ] + | copy_option + | FIXED FORMATTER ( { column_name( offset, length ) } [, ...] ) [ ( option [, ...] ) | copy_option [ ...] ] ]; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >In the syntax, **FIXED FORMATTER \(\{column\_name\(offset, length\)\} \[, ...\]\)** and **\[\(option \[, ...\]\) | copy\_option \[...\]\]** can be in any sequence. + +- Copy data from a table to a file. + + ``` + COPY table_name [ ( column_name [, ...] ) ] + TO { 'filename' | STDOUT } + [ [ USING ] DELIMITERS 'delimiters' ] + [ WITHOUT ESCAPING ] + [ WITH ( option [, ...] ) ] + | copy_option + | FIXED FORMATTER ( { column_name( offset, length ) } [, ...] ) [ ( option [, ...] ) | copy_option [ ...] ] ]; + + COPY query + TO { 'filename' | STDOUT } + [ WITHOUT ESCAPING ] + [ WITH ( option [, ...] ) ] + | copy_option + | FIXED FORMATTER ( { column_name( offset, length ) } [, ...] ) [ ( option [, ...] ) | copy_option [ ...] ] ]; + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >1. The syntax constraints of **COPY TO** are as follows: + > **\(query\)** is incompatible with **\[USING\] DELIMITER**. If the data comes from a query result, **COPY TO** cannot specify **\[USING\] DELIMITERS**. + >2. Use spaces to separate **copy\_option** following **FIXED FORMATTTER**. + >3. **copy\_option** is the native parameter, while **option** is the parameter imported by a compatible foreign table. + >4. In the syntax, **FIXED FORMATTER \(\{column\_name\(offset, length\)\} \[, ...\]\)** and **\[\(option \[, ...\]\) | copy\_option \[...\]\]** can be in any sequence. + + The syntax of the optional parameter **option** is as follows: + + ``` + FORMAT 'format_name' + | OIDS [ boolean ] + | DELIMITER 'delimiter_character' + | NULL 'null_string' + | HEADER [ boolean ] + | FILEHEADER 'header_file_string' + | FREEZE [ boolean ] + | QUOTE 'quote_character' + | ESCAPE 'escape_character' + | EOL 'newline_character' + | NOESCAPING [ boolean ] + | FORCE_QUOTE { ( column_name [, ...] ) | * } + | FORCE_NOT_NULL ( column_name [, ...] ) + | ENCODING 'encoding_name' + | IGNORE_EXTRA_DATA [ boolean ] + | FILL_MISSING_FIELDS [ boolean ] + | COMPATIBLE_ILLEGAL_CHARS [ boolean ] + | DATE_FORMAT 'date_format_string' + | TIME_FORMAT 'time_format_string' + | TIMESTAMP_FORMAT 'timestamp_format_string' + | SMALLDATETIME_FORMAT 'smalldatetime_format_string' + ``` + + The syntax of the optional parameter **copy\_option** is as follows: + + ``` + OIDS + | NULL 'null_string' + | HEADER + | FILEHEADER 'header_file_string' + | FREEZE + | FORCE_NOT_NULL column_name [, ...] + | FORCE_QUOTE { column_name [, ...] | * } + | BINARY + | CSV + | QUOTE [ AS ] 'quote_character' + | ESCAPE [ AS ] 'escape_character' + | EOL 'newline_character' + | ENCODING 'encoding_name' + | IGNORE_EXTRA_DATA + | FILL_MISSING_FIELDS + | COMPATIBLE_ILLEGAL_CHARS + | DATE_FORMAT 'date_format_string' + | TIME_FORMAT 'time_format_string' + | TIMESTAMP_FORMAT 'timestamp_format_string' + | SMALLDATETIME_FORMAT 'smalldatetime_format_string' + ``` + + +## Parameter Description + +- **query** + + Specifies that the results are to be copied. + + Value range: a **SELECT** or **VALUES** command in parentheses + +- **table\_name** + + Specifies the name \(possibly schema-qualified\) of an existing table. + + Value range: an existing table name + +- **column\_name** + + Specifies an optional list of columns to be copied. + + Value range: any columns. All columns will be copied if no column list is specified. + +- **STDIN** + + Specifies that input comes from the standard input. + +- **STDOUT** + + Specifies that output goes to the standard output. + +- **FIXED** + + Fixes column length. When the column length is fixed, **DELIMITER**, **NULL**, and **CSV** cannot be specified. When **FIXED** is specified, **BINARY**, **CSV**, and **TEXT** cannot be specified by **option** or **copy\_option**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The definition of fixed length is as follows: + >1. The column length of each record is the same. + >2. Spaces are used for column padding. Columns of the numeric type are left-aligned and columns of the string type are right-aligned. + >3. No delimiters are used between columns. + +- **\[USING\] DELIMITER 'delimiters'** + + The string that separates columns within each row \(line\) of the file, and it cannot be larger than 10 bytes. + + Value range: The delimiter cannot include any of the following characters: \\.abcdefghijklmnopqrstuvwxyz0123456789 + + The default value is a tab character in text format and a comma in CSV format. + +- **WITHOUT ESCAPING** + + Specifies, in text format, whether to escape the backslash \(\\\) and its following characters. + + Value range: text only + +- **LOG ERRORS** + + If this parameter is specified, the error tolerance mechanism for data type errors in the **COPY FROM** statement is enabled. Row errors are recorded in the **public.pgxc\_copy\_error\_log** table in the database for future reference. + + Value range: a value set while data is imported using **COPY FROM**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The restrictions of this error tolerance parameter are as follows: + >- This error tolerance mechanism captures only the data type errors \(DATA\_EXCEPTION\) that occur during data parsing of **COPY FROM** on the primary node of the database. + >- Before enabling error tolerance for **COPY FROM** for the first time in a database, check whether the **public.pgxc\_copy\_error\_log** table exists. If not, call the **copy\_error\_log\_create\(\)** function to create it. If it does, copy its data elsewhere, delete it, and call the **copy\_error\_log\_create\(\)** function to create the table. For details about columns in the **public.pgxc\_copy\_error\_log** table, see [Table 1](other-functions.md#en-us_topic_0237121997_table138318280213). + >- While a **COPY FROM** statement with specified **LOG ERRORS** is being executed, if **public.pgxc\_copy\_error\_log** does not exist or does not have the table definitions compliant with those predefined in **copy\_error\_log\_create\(\)**, an error will be reported. Ensure that the error table is created using the **copy\_error\_log\_create\(\)** function. Otherwise, **COPY FROM** statements with error tolerance may fail to be run. + >- If existing error tolerance parameters \(for example, **IGNORE\_EXTRA\_DATA**\) of the **COPY** statement are enabled, the error of the corresponding type will be processed as specified by the parameters and no error will be reported. Therefore, the error table does not contain such error data. + +- **LOG ERRORS DATA** + + The differences between **LOG ERRORS DATA** and **LOG ERRORS** are as follows: + + 1. **LOG ERRORS DATA** fills the **rawrecord** field in the error tolerance table. + 2. Only users with the super permission can use the **LOG ERRORS DATA** parameter. + + >![](public_sys-resources/icon-caution.gif) **CAUTION:** + >If error content is too complex, it may fail to be written to the error tolerance table by using **LOG ERRORS DATA**, causing a task failure. + + +- **REJECT LIMIT **'**limit'** + + Used with the **LOG ERROR** parameter to set the upper limit of the tolerated errors in the **COPY FROM** statement. If the number of errors exceeds the limit, later errors will be reported based on the original mechanism. + + Value range: a positive integer \(1 to _INTMAX_\) or **unlimited** + + Default value: If **LOG ERRORS** is not specified, an error will be reported. If **LOG ERRORS** is specified, the default value is **0**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >In the error tolerance mechanism described in the description of **LOG ERRORS**, the count of **REJECT LIMIT** is calculated based on the number of data parsing errors on the primary node of the database where the **COPY FROM** statement is executed, not based on the number of all errors on the primary node. + +- **FORMATTER** + + Defines the place of each column in the data file in fixed length mode. Defines the place of each column in the data file in the **column\(**_offset_,_length_**\)** format. + + Value range: + + - The value of **offset** must be larger than 0. The unit is byte. + - The value of **length** must be larger than 0. The unit is byte. + + The total length of all columns must be less than 1 GB. + + Replace columns that are not in the file with null. + +- **OPTION \{ option\_name ' value ' \}** + + Specifies all types of parameters of a compatible foreign table. + + - FORMAT + + Specifies the format of the source data file in the foreign table. + + Value range: **CSV**, **TEXT**, **FIXED**, and **BINARY** + + - The CSV file can process newline characters efficiently, but cannot process certain special characters well. + - The TEXT file can process certain special characters efficiently, but cannot process newline characters well. + - In FIXED files, the column length of each record is the same. Spaces are used for padding, and the excessive part will be truncated. + - All data in the BINARY file is stored/read as binary format rather than as text. It is faster than the text and CSV formats, but a binary-format file is less portable. + + Default value: **TEXT** + + - DELIMITER + + Specifies the character that separates columns within each row \(line\) of the file. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The value of **DELIMITER** cannot be **\\r** or **\\n**. + >- A delimiter cannot be the same as the null value. The delimiter for the CSV format cannot be same as the **quote** value. + >- The delimiter for the TEXT format data cannot contain lowercase letters, digits, or special characters \(.\\\). + >- The data length of a single row should be less than 1 GB. A row that has many columns using long delimiters cannot contain much valid data. + >- You are advised to use multi-character delimiters or invisible delimiters. For example, you can use multi-characters \(such as $^&\) and invisible characters \(such as 0x07, 0x08, and 0x1b\). + + Value range: a multi-character delimiter within 10 bytes + + Default value: + + - A tab character in TEXT format + - A comma \(,\) in CSV format + - No delimiter in FIXED format + + - NULL + + Specifies the string that represents a null value. + + Value range: + + - A null value cannot be **\\r** or **\\n**. The maximum length is 100 characters. + - A null value cannot be the same as the **DELIMITER** or **QUOTE** value. + + Default value: + + - The default value for the CSV format is an empty string without quotation marks. + - The default value for the TEXT format is **\\N**. + + - HEADER + + Specifies whether a file contains a header with the names of each column in the file. **header** is available only for CSV and FIXED files. + + When data is imported, if **header** is **on**, the first row of the data file will be identified as the header and ignored. If **header** is **off**, the first row will be identified as a data row. + + When data is exported, if **header** is **on**, **fileheader** must be specified. If **header** is **off**, an exported file does not contain a header. + + Value range: **true/on** and **false/off** + + Default value: **false** + + - QUOTE + + Specifies a quoted character string for a CSV file. + + Default value: single quotation marks \(''\) + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The value of **QUOTE** cannot be the same as that of **DELIMITER** or **NULL**. + >- The value of **QUOTE** must be a single-byte character. + >- Invisible characters are recommended, such as 0x07, 0x08, and 0x1b. + + - ESCAPE + + Specifies an escape character for a CSV file. The value must be a single-byte character. + + Default value: single quotation marks \(''\) If the value is the same as that of **QUOTE**, it will be replaced by **\\0**. + + - EOL 'newline\_character' + + Specifies the newline character style of the imported or exported data file. + + Value range: multi-character newline characters within 10 bytes Common newline characters include **\\r** \(0x0D\), **\\n** \(0x0A\), and **\\r\\n **\(0x0D0A\). Special newline characters include **$** and **\#**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The EOL parameter supports only the TEXT format for data import and export and does not support the CSV or FIXED format for data import. For forward compatibility, the EOL parameter can be set to **0x0D** or **0x0D0A** for data export in the CSV or FIXED format. + >- The value of **EOL** cannot be the same as that of **DELIMITER** or **NULL**. + >- The EOL parameter value cannot contain the following characters: .abcdefghijklmnopqrstuvwxyz0123456789. + + - FORCE\_QUOTE \{ \( column\_name \[, ...\] \) | \* \} + + Forces quotation marks to be used for all non-null values in each specified column, in **CSV COPY TO** mode. Null values are not quoted. + + Value range: an existing column name + + - FORCE\_NOT\_NULL \( column\_name \[, ...\] \) + + Assigns a value to a specified column in **CSV COPY FROM** mode. + + Value range: an existing column name + + - ENCODING + + Specifies that the file is encoded in the **encoding\_name**. If this option is omitted, the current encoding format is used by default. + + - IGNORE\_EXTRA\_DATA + + Specifies whether to ignore excessive columns when the number of data source files exceeds the number of foreign table columns. This parameter is used only during data import. + + Value range: **true/on** and **false/off** + + - **true/on**: If the number of columns in a data source file is greater than that defined by the foreign table, the extra columns at the end of a row are ignored. + - **false/off**: If the number of columns in a data source file is greater than that defined by the foreign table, the following error message is reported: + + ``` + extra data after last expected column + ``` + + Default value: **false** + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >If a newline character at the end of a row is missing and the row and another row are integrated into one, data in another row is ignored after the parameter is set to **true**. + + - COMPATIBLE\_ILLEGAL\_CHARS + + Specifies whether to tolerate invalid characters during data import. The parameter is valid only for data import using **COPY FROM**. + + Value range: **true/on** and **false/off** + + - **true/on**: No error message is reported and data import is not interrupted when there are invalid characters. Invalid characters are converted into valid ones, and then imported to the database. + - **false/off**: An error occurs when there are invalid characters, and the import stops. + + Default value: **false/off** + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The rules for converting invalid characters are as follows: + >1. **\\0** is converted to a space. + >2. Other invalid characters are converted to question marks. + >3. When **compatible\_illegal\_chars** is set to **true/on**, after invalid characters such as **NULL**, **DELIMITER**, **QUOTE**, and **ESCAPE** are converted to spaces or question marks, an error message stating "illegal chars conversion may confuse COPY escape 0x20" will be displayed to remind you of possible parameter confusion caused by the conversion. + + - FILL\_MISSING\_FIELDS + + Specifies how to handle the problem that the last column of a row in a source data file is lost during data import. + + Value range: **true/on** and **false/off** + + Default value: **false/off** + + - DATE\_FORMAT + + Specifies the DATE format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid DATE value For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >You can use the **TIMESTAMP\_FORMAT** parameter to set the DATE format to **TIMESTAMP** for data import. For details, see **TIMESTAMP\_FORMAT** below. + + - TIME\_FORMAT + + Specifies the TIME format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid TIME value. Time zones are not supported. For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + - TIMESTAMP\_FORMAT + + Specifies the TIMESTAMP format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid TIMESTAMP value. Time zones cannot be used. For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + - SMALLDATETIME\_FORMAT + + Specifies the SMALLDATETIME format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid SMALLDATETIME value For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + +- **COPY\_OPTION \{ option\_name ' value ' \}** + + Specifies all types of native parameters of **COPY**. + + - NULL null\_string + + Specifies the string that represents a null value. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >When using **COPY FROM**, any data item that matches this string will be stored as a null value, so make sure that you use the same string as you used with **COPY TO**. + + Value range: + + - A null value cannot be **\\r** or **\\n**. The maximum length is 100 characters. + - A null value cannot be the same as the **DELIMITER** or **QUOTE** value. + + Default value: + + - The default value for the TEXT format is **\\N**. + - The default value for the CSV format is an empty string without quotation marks. + + - HEADER + + Specifies whether a file contains a header with the names of each column in the file. **header** is available only for CSV and FIXED files. + + When data is imported, if **header** is **on**, the first row of the data file will be identified as the header and ignored. If **header** is **off**, the first row will be identified as a data row. + + When data is exported, if **header** is **on**, **fileheader** must be specified. If **header** is **off**, an exported file does not contain a header. + + - FILEHEADER + + Specifies a file that defines the content in the header for exported data. The file contains data description of each column. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- This parameter is available only when **header** is **on** or **true**. + >- **fileheader** specifies an absolute path. + >- The file can contain only one row of header information, and ends with a newline character. Excess rows will be discarded. \(Header information cannot contain newline characters.\) + >- The length of the file including the newline character cannot exceed 1 MB. + + - FREEZE + + Sets the **COPY** loaded data row as **frozen**, like these data have executed **VACUUM FREEZE**. + + This is a performance option of initial data loading. The data will be frozen only when the following three requirements are met: + + - The table being loaded has been created or truncated in the current subtransaction before copying. + - There are no cursors open in the current transaction. + - There are no original snapshots in the current transaction. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >When **COPY** is completed, all the other sessions will see the data immediately. However, this violates the general principle of MVCC visibility, and users should understand that this may cause potential risks. + + - FORCE NOT NULL column\_name \[, ...\] + + In **CSV COPY FROM** mode, the specified column is not null. If the column is null, its value is regarded as a string of 0 characters. + + Value range: an existing column name + + - FORCE QUOTE \{ column\_name \[, ...\] | \* \} + + Forces quotation marks to be used for all non-null values in each specified column, in **CSV COPY TO** mode. Null values are not quoted. + + Value range: an existing column name + + - BINARY + + Specifies that data is stored and read in binary mode instead of text mode. In binary mode, you cannot declare **DELIMITER**, **NULL**, or **CSV**. When **BINARY** is specified, **CSV**, **FIXED**, and **TEXT** cannot be specified through **option** or **copy\_option**. + + - CSV + + Enables the CSV mode. When **CSV** is specified, **BINARY**, **FIXED**, and **TEXT** cannot be specified through **option** or **copy\_option**. + + - QUOTE \[AS\] 'quote\_character' + + Specifies a quoted character string for a CSV file. + + Default value: single quotation marks \(''\) + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The value of **QUOTE** cannot be the same as that of **DELIMITER** or **NULL**. + >- The value of **QUOTE** must be a single-byte character. + >- Invisible characters are recommended, such as 0x07, 0x08, and 0x1b. + + - ESCAPE \[AS\] 'escape\_character' + + Specifies an escape character for a CSV file. The value must be a single-byte character. + + The default value is single quotation marks \(''\). If the value is the same as that of **QUOTE**, it will be replaced by **\\0**. + + - EOL 'newline\_character' + + Specifies the newline character style of the imported or exported data file. + + Value range: multi-character newline characters within 10 bytes Common newline characters include **\\r** \(0x0D\), **\\n** \(0x0A\), and **\\r\\n **\(0x0D0A\). Special newline characters include **$** and **\#**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- The **EOL** parameter supports only the TEXT format for data import and export and does not support the CSV or FIXED format. For forward compatibility, the EOL parameter can be set to **0x0D** or **0x0D0A** for data export in the CSV or FIXED format. + >- The value of **EOL** cannot be the same as that of **DELIMITER** or **NULL**. + >- The EOL parameter value cannot contain the following characters: .abcdefghijklmnopqrstuvwxyz0123456789. + + - ENCODING 'encoding\_name' + + Specifies the name of a file encoding format. + + Value range: a valid encoding format + + Default value: current encoding format + + - IGNORE\_EXTRA\_DATA + + If the number of columns in a data source file is greater than that defined by the foreign table, the extra columns at the end of a row are ignored. This parameter is used only during data import. + + If this parameter is not used and the number of columns in the data source file is greater than that defined in the foreign table, the following error information is displayed: + + ``` + extra data after last expected column + ``` + + - COMPATIBLE\_ILLEGAL\_CHARS + + Specifies that invalid characters are tolerated during data import. Invalid characters are converted and then imported to the database. No error is reported and the import is not interrupted. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + If this parameter is not used, an error is reported when invalid characters are encountered during the import, and the import is interrupted. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The rules for converting invalid characters are as follows: + >1. **\\0** is converted to a space. + >2. Other invalid characters are converted to question marks. + >3. When **compatible\_illegal\_chars** is set to **true/on**, after invalid characters such as **NULL**, **DELIMITER**, **QUOTE**, and **ESCAPE** are converted to spaces or question marks, an error message stating "illegal chars conversion may confuse COPY escape 0x20" will be displayed to remind you of possible parameter confusion caused by the conversion. + + - FILL\_MISSING\_FIELDS + + Specifies how to handle the problem that the last column of a row in a source data file is lost during data import. + + Value range: **true/on** and **false/off** + + Default value: **false/off** + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >Do not specify this option. Currently, it does not enable error tolerance, but will make the parser ignore the said errors during data parsing on the primary node of the database. Such errors will not be recorded in the COPY error table \(enabled using **LOG ERRORS REJECT LIMIT**\) but will be reported later by the database node. Therefore, do not specify this option. + + - DATE\_FORMAT 'date\_format\_string' + + Specifies the DATE format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid DATE value For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >You can use the **TIMESTAMP\_FORMAT** parameter to set the DATE format to **TIMESTAMP** for data import. For details, see **TIMESTAMP\_FORMAT** below. + + - TIME\_FORMAT 'time\_format\_string' + + Specifies the TIME format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid TIME value. Time zones are not supported. For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + - TIMESTAMP\_FORMAT 'timestamp\_format\_string' + + Specifies the TIMESTAMP format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid TIMESTAMP value. Time zones cannot be used. For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + - SMALLDATETIME\_FORMAT 'smalldatetime\_format\_string' + + Specifies the SMALLDATETIME format for data import. The BINARY format is not supported. When data of such format is imported, error "cannot specify bulkload compatibility options in BINARY mode" will occur. The parameter is valid only for data import using **COPY FROM**. + + Value range: a valid SMALLDATETIME value For details, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + + The following special backslash sequences are recognized by **COPY FROM**: + + - **\\b**: Backslash \(ASCII 8\) + - **\\f**: Form feed \(ASCII 12\) + - **\\n**: Newline character \(ASCII 10\) + - **\\r**: Carriage return character \(ASCII 13\) + - **\\t**: Tab \(ASCII 9\) + - **\\v**: Vertical tab \(ASCII 11\) + - **\\digits**: Backslash followed by one to three octal digits specifies that the ASCII value is the character with that numeric code. + - **\\xdigits**: Backslash followed by an x and one or two hex digits specifies the character with that numeric code. + + +## Examples + +``` +-- Copy data from the tpcds.ship_mode file to the /home/omm/ds_ship_mode.dat file: +postgres=# COPY tpcds.ship_mode TO '/home/omm/ds_ship_mode.dat'; + +-- Output tpcds.ship_mode to stdout. +postgres=# COPY tpcds.ship_mode TO stdout; + +-- Create the tpcds.ship_mode_t1 table. +postgres=# CREATE TABLE tpcds.ship_mode_t1 +( + SM_SHIP_MODE_SK INTEGER NOT NULL, + SM_SHIP_MODE_ID CHAR(16) NOT NULL, + SM_TYPE CHAR(30) , + SM_CODE CHAR(10) , + SM_CARRIER CHAR(20) , + SM_CONTRACT CHAR(20) +) +WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE) +; + +-- Copy data from stdin to the tpcds.ship_mode_t1 table. +postgres=# COPY tpcds.ship_mode_t1 FROM stdin; + +-- Copy data from the /home/omm/ds_ship_mode.dat file to the tpcds.ship_mode_t1 table. +postgres=# COPY tpcds.ship_mode_t1 FROM '/home/omm/ds_ship_mode.dat'; + +-- Copy data from the /home/omm/ds_ship_mode.dat file to the tpcds.ship_mode_t1 table, with the import format set to TEXT (format 'text'), the delimiter set to \t' (delimiter E'\t'), excessive columns ignored (ignore_extra_data 'true'), and characters not escaped (noescaping 'true'). +postgres=# COPY tpcds.ship_mode_t1 FROM '/home/omm/ds_ship_mode.dat' WITH(format 'text', delimiter E'\t', ignore_extra_data 'true', noescaping 'true'); + +-- Copy data from the /home/omm/ds_ship_mode.dat file to the tpcds.ship_mode_t1 table, with the import format set to FIXED, fixed-length format specified (FORMATTER(SM_SHIP_MODE_SK(0, 2), SM_SHIP_MODE_ID(2,16), SM_TYPE(18,30), SM_CODE(50,10), SM_CARRIER(61,20), SM_CONTRACT(82,20))), excessive columns ignored (ignore_extra_data), and headers included (header). +postgres=# COPY tpcds.ship_mode_t1 FROM '/home/omm/ds_ship_mode.dat' FIXED FORMATTER(SM_SHIP_MODE_SK(0, 2), SM_SHIP_MODE_ID(2,16), SM_TYPE(18,30), SM_CODE(50,10), SM_CARRIER(61,20), SM_CONTRACT(82,20)) header ignore_extra_data; + +-- Delete the tpcds.ship_mode_t1 table: +postgres=# DROP TABLE tpcds.ship_mode_t1; +``` + diff --git a/content/en/docs/Developerguide/copymanager.md b/content/en/docs/Developerguide/copymanager.md new file mode 100644 index 000000000..dae6642ad --- /dev/null +++ b/content/en/docs/Developerguide/copymanager.md @@ -0,0 +1,109 @@ +# CopyManager + +CopyManager is an API class provided by the JDBC driver in openGauss. It is used to import data to openGauss in batches. + +## Inheritance Relationship of CopyManager + +The CopyManager class is in the **org.postgresql.copy** package and inherits the java.lang.Object class. The declaration of the class is as follows: + +``` +public class CopyManager +extends Object +``` + +## Construction Method + +public CopyManager\(BaseConnection connection\) + +throws SQLException + +## Common Methods + +**Table 1** Common methods of CopyManager + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Return Value

+

Method

+

Description

+

throws

+

CopyIn

+

copyIn(String sql)

+

-

+

SQLException

+

long

+

copyIn(String sql, InputStream from)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from InputStream.

+

SQLException,IOException

+

long

+

copyIn(String sql, InputStream from, int bufferSize)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from InputStream.

+

SQLException,IOException

+

long

+

copyIn(String sql, Reader from)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from Reader.

+

SQLException,IOException

+

long

+

copyIn(String sql, Reader from, int bufferSize)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from Reader.

+

SQLException,IOException

+

CopyOut

+

copyOut(String sql)

+

-

+

SQLException

+

long

+

copyOut(String sql, OutputStream to)

+

Sends the result set of COPY TO STDOUT from the database to the OutputStream class.

+

SQLException,IOException

+

long

+

copyOut(String sql, Writer to)

+

Sends the result set of COPY TO STDOUT from the database to the Writer class.

+

SQLException,IOException

+
+ diff --git a/content/en/docs/Developerguide/core-dump-occurs-due-to-full-disk-space.md b/content/en/docs/Developerguide/core-dump-occurs-due-to-full-disk-space.md new file mode 100644 index 000000000..f9a4e31c9 --- /dev/null +++ b/content/en/docs/Developerguide/core-dump-occurs-due-to-full-disk-space.md @@ -0,0 +1,16 @@ +# Core Dump Occurs due to Full Disk Space + +## Symptom + +When TPC-C is running, the disk space is full during injection. As a result, a core dump occurs on the GaussDB process, as shown in the following figure. + +![](figures/en-us_image_0244851037.png) + +## Cause Analysis + +When the disk is full, Xlog logs cannot be written. The program exits through the panic log. + +## Procedure + +Externally monitor the disk usage and periodically clean up the disk. + diff --git a/content/en/docs/Developerguide/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log_directory.md b/content/en/docs/Developerguide/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log_directory.md new file mode 100644 index 000000000..9da3d8c42 --- /dev/null +++ b/content/en/docs/Developerguide/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log_directory.md @@ -0,0 +1,14 @@ +# Core Dump Occurs Due to Incorrect Settings of GUC Parameter log\_directory + +## Symptom + +After the database process is started, a core dump occurs and no log is recorded. + +## Cause Analysis + +The directory specified by GUC parameter **log\_directory** cannot be read or you do not have permissions to access this directory. As a result, the verification fails during the database startup, and the program exits through the panic log. + +## Procedure + +Set **log\_directory** to a valid directory. For details, see [log\_directory](logging-destination.md#en-us_topic_0237124721_en-us_topic_0059778787_sfbedf09fcf1a4223a4538679f80f12a9). + diff --git a/content/en/docs/Developerguide/core-fault-locating.md b/content/en/docs/Developerguide/core-fault-locating.md new file mode 100644 index 000000000..205ca26af --- /dev/null +++ b/content/en/docs/Developerguide/core-fault-locating.md @@ -0,0 +1,7 @@ +# Core Fault Locating + +- **[Core Dump Occurs due to Full Disk Space](core-dump-occurs-due-to-full-disk-space.md)** + +- **[Core Dump Occurs Due to Incorrect Settings of GUC Parameter log\_directory](core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log_directory.md)** + + diff --git a/content/en/docs/Developerguide/cost-based-vacuum-delay.md b/content/en/docs/Developerguide/cost-based-vacuum-delay.md new file mode 100644 index 000000000..200b18535 --- /dev/null +++ b/content/en/docs/Developerguide/cost-based-vacuum-delay.md @@ -0,0 +1,69 @@ +# Cost-based Vacuum Delay + +This feature allows administrators to reduce the I/O impact of the **VACUUM** and **ANALYZE** statements on concurrent database activities. It is often more important to prevent maintenance statements, such as **VACUUM** and **ANALYZE**, from affecting other database operations than to run them quickly. Cost-based vacuum delay provides a way for administrators to achieve this purpose. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Certain vacuum operations hold critical locks and should be complete as quickly as possible. In openGauss, cost-based vacuum delays do not take effect during such operations. To avoid uselessly long delays in such cases, the actual delay is the larger of the two calculated values: +>- **vacuum\_cost\_delay** x **accumulated\_balance**/**vacuum\_cost\_limit** +>- **vacuum\_cost\_delay** x 4 + +## Background + +During the execution of the [ANALYZE | ANALYSE](analyze-analyse.md) and [VACUUM](vacuum.md) statements, the system maintains an internal counter that keeps track of the estimated cost of the various I/O operations that are performed. When the accumulated cost reaches a limit \(specified by **vacuum\_cost\_limit**\), the process performing the operation will sleep for a short period of time \(specified by **vacuum\_cost\_delay**\). Then, the counter resets and the operation continues. + +By default, this feature is disabled. To enable this feature, set **vacuum\_cost\_delay** to a positive value. + +## vacuum\_cost\_delay + +**Parameter description**: Specifies the length of time that a process will sleep when **vacuum\_cost\_limit** has been exceeded. + +In many systems, the effective resolution of the sleep length is 10 milliseconds. Therefore, setting this parameter to a value that is not a multiple of 10 has the same effect as setting it to the next higher multiple of 10. + +This parameter is usually set to a small value, such as 10 or 20 milliseconds. Adjusting vacuum's resource consumption is best done by changing other vacuum cost parameters. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 100 \(unit: ms\). A positive number enables cost-based vacuum delay and **0** disables cost-based vacuum delay. + +**Default value**: **0** + +## vacuum\_cost\_page\_hit + +**Parameter description**: Specifies the estimated cost for vacuuming a buffer found in the shared buffer. It represents the cost to lock the buffer pool, look up the shared hash table, and scan the content of the page. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 10000 + +**Default value**: **1** + +## vacuum\_cost\_page\_miss + +**Parameter description**: Specifies the estimated cost for vacuuming a buffer read from the disk. It represents the cost to lock the buffer pool, look up the shared hash table, read the desired block from the disk, and scan the block. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 10000 + +**Default value:** **10** + +## vacuum\_cost\_page\_dirty + +**Parameter description**: Specifies the estimated cost charged when vacuum modifies a block that was previously clean. It represents the extra I/O required to flush the dirty block out to disk again. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 1000 + +**Default value:** **20** + +## vacuum\_cost\_limit + +**Parameter description**: Specifies the cost limit. The vacuuming process will sleep if this limit is exceeded. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 1 to 10000 + +**Default value**: **200** + diff --git a/content/en/docs/Developerguide/cpu.md b/content/en/docs/Developerguide/cpu.md new file mode 100644 index 000000000..3c45dd6f2 --- /dev/null +++ b/content/en/docs/Developerguide/cpu.md @@ -0,0 +1,117 @@ +# CPU + +You can run the **top** command to check the CPU usage of each node in openGauss and analyze whether performance bottleneck caused by heavy CPU load exists. + +## Checking CPU Usage + +You can query the CPU usage of the server in the following ways: + +On each storage node, run the **top** command to check the CPU usage. Then, press **1** to view the usage of each CPU core. + +``` +top - 17:05:04 up 32 days, 20:34, 5 users, load average: 0.02, 0.02, 0.00 +Tasks: 124 total, 1 running, 123 sleeping, 0 stopped, 0 zombie +Cpu0 : 0.0%us, 0.3%sy, 0.0%ni, 69.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu1 : 0.3%us, 0.3%sy, 0.0%ni, 69.3%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu2 : 0.3%us, 0.3%sy, 0.0%ni, 69.3%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Cpu3 : 0.3%us, 0.3%sy, 0.0%ni, 69.3%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Mem: 8038844k total, 7165272k used, 873572k free, 530444k buffers +Swap: 4192924k total, 4920k used, 4188004k free, 4742904k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + + 35184 omm 20 0 822m 421m 128m S 0 5.4 5:28.15 gaussdb + 1 root 20 0 13592 820 784 S 0 0.0 1:16.62 init +``` + +In the command output, focus on the CPU usage occupied by each process. + +**us** indicates the CPU percentage occupied by the user space, **sy** indicates the CPU percentage occupied by the kernel space, and **id** indicates the idle CPU percentage. If **id** is less than 10%, the CPU load is high. In this case, you can reduce the CPU load by reducing the number of tasks on nodes. + +## Analyzing Performance Parameters + +1. Run the **top-H** command to check the CPU usage. The following is displayed: + + ``` + 14 root 20 0 0 0 0 S 0 0.0 0:16.41 events/3 + top - 14:22:49 up 5 days, 21:51, 2 users, load average: 0.08, 0.08, 0.06 + Tasks: 312 total, 1 running, 311 sleeping, 0 stopped, 0 zombie + Cpu(s): 1.3%us, 0.7%sy, 0.0%ni, 95.0%id, 2.4%wa, 0.5%hi, 0.2%si, 0.0%st + Mem: 8038844k total, 5317668k used, 2721176k free, 180268k buffers + Swap: 4192924k total, 0k used, 4192924k free, 2886860k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + + 3105 root 20 0 50492 11m 2708 S 3 0.1 22:22.56 acc-snf + + 4015 gdm 20 0 232m 23m 11m S 0 0.3 11:34.70 gdm-simple-gree + 51001 omm 20 0 12140 1484 948 R 0 0.0 0:00.94 top + + 54885 omm 20 0 615m 396m 116m S 0 5.1 0:09.44 gaussdb + + + 1 root 20 0 13592 944 792 S 0 0.0 0:08.54 init + ``` + +2. In the query result for **Cpu\(s\)**, check whether the system CPU \(**sy**\) or user CPU \(**us**\) usage is high. + - If the system CPU usage is too high, you need to identify the abnormal system processes and handle them. + - If the CPU usage of the openGauss process whose **USER** is **omm** is too high, optimize the service-related SQL statements based on the running services queries. Based on the features of the currently running service, perform the following operations to check whether this process containing infinite loop logics. + 1. Run the **top -H -p pid** command to identify the threads that use much CPU in the process. + + ``` + top -H -p 54952 + ``` + + The threads causing high CPU usage are displayed in the **top** column of the command output. In this section, thread **54775** is used as an example for analyzing the causes of the high CPU usage. + + ``` + top - 14:23:27 up 5 days, 21:52, 2 users, load average: 0.04, 0.07, 0.05 + Tasks: 13 total, 0 running, 13 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.9%us, 0.4%sy, 0.0%ni, 97.3%id, 1.1%wa, 0.2%hi, 0.1%si, 0.0%st + Mem: 8038844k total, 5322180k used, 2716664k free, 180316k buffers + Swap: 4192924k total, 0k used, 4192924k free, 2889860k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 54775 omm 20 0 684m 424m 131m S 0 5.4 0:00.32 gaussdb + 54951 omm 20 0 684m 424m 131m S 0 5.4 0:00.84 gaussdb + 54732 omm 20 0 684m 424m 131m S 0 5.4 0:00.24 gaussdb + 54758 omm 20 0 684m 424m 131m S 0 5.4 0:00.00 gaussdb + 54759 omm 20 0 684m 424m 131m S 0 5.4 0:00.02 gaussdb + 54773 omm 20 0 684m 424m 131m S 0 5.4 0:02.79 gaussdb + 54780 omm 20 0 684m 424m 131m S 0 5.4 0:00.04 gaussdb + 54781 omm 20 0 684m 424m 131m S 0 5.4 0:00.21 gaussdb + 54782 omm 20 0 684m 424m 131m S 0 5.4 0:00.02 gaussdb + 54798 omm 20 0 684m 424m 131m S 0 5.4 0:16.70 gaussdb + 54952 omm 20 0 684m 424m 131m S 0 5.4 0:07.51 gaussdb + 54953 omm 20 0 684m 424m 131m S 0 5.4 0:00.81 gaussdb + 54954 omm 20 0 684m 424m 131m S 0 5.4 0:06.54 gaussdb + ``` + + 2. Run the following command to view the function invocation stack for each thread in the process. Check the thread number for the ID of the thread that occupies high CPU usage in the last step. + + ``` + gstack 54954 + ``` + + The query result is as follows. The thread number for the thread ID **54775** is **10**. + + ``` + 192.168.0.11:~ # gstack 54954 + Thread 10 (Thread 0x7f95a5fff710 (LWP 54775)): + #0 0x00007f95c41d63c6 in poll () from /lib64/libc.so.6 + #1 0x0000000000d3d2d3 in WaitLatchOrSocket(Latch volatile*, int, int, long) () + #2 0x000000000095ed25 in XLogPageRead(XLogRecPtr*, int, bool, bool) () + #3 0x000000000095f6dd in ReadRecord(XLogRecPtr*, int, bool) () + #4 0x000000000096aef0 in StartupXLOG() () + #5 0x0000000000d5607a in StartupProcessMain() () + #6 0x00000000009e19f9 in AuxiliaryProcessMain(int, char**) () + #7 0x0000000000d50135 in SubPostmasterMain(int, char**) () + #8 0x0000000000d504ec in MainStarterThreadFunc(void*) () + #9 0x00007f95c79b85f0 in start_thread () from /lib64/libpthread.so.0 + #10 0x00007f95c41df84d in clone () from /lib64/libc.so.6 + #11 0x0000000000000000 in ?? () + ``` + + + + diff --git a/content/en/docs/Developerguide/create-data-source.md b/content/en/docs/Developerguide/create-data-source.md new file mode 100644 index 000000000..25ea3dad1 --- /dev/null +++ b/content/en/docs/Developerguide/create-data-source.md @@ -0,0 +1,92 @@ +# CREATE DATA SOURCE + +## Function + +**CREATE DATA SOURCE** creates an external data source, which defines the information about the database that openGauss will connect to. + +## Precautions + +- The data source name must be unique in the database and comply with the identifier naming rules. Its length cannot exceed 63 bytes. Otherwise, it will be truncated. +- Only the system administrator or initial user has the permission to create data sources. The user who creates the object is the default owner of the object. +- If the **password** option is displayed, ensure that the **datasource.key.cipher** and **datasource.key.rand** files exist in the _$GAUSSHOME_**/bin** directory of each node in openGauss. If the two files do not exist, use the **gs\_guc** tool to generate them and use the **gs\_ssh** tool to release them to the _$GAUSSHOME_**/bin** directory on each node in openGauss. + +## Syntax + +``` +CREATE DATA SOURCE src_name + [TYPE 'type_str'] + [VERSION {'version_str' | NULL}] + [OPTIONS (optname 'optvalue' [, ...])]; +``` + +## Parameter Description + +- **src\_name** + + Specifies the name of the new data source, which must be unique in the database. + + Value range: a string. It must comply with the naming convention rule. + +- **TYPE** + + Specifies the type of the data source. This parameter can be left empty, and its default value will be used. + + Value range: an empty string or a non-empty string + +- **VERSION** + + Specifies the version number of the new data source. This parameter can be left empty or set to null. + + Value range: an empty string, a non-empty string, or null + +- **OPTIONS** + + Specifies the options of the data source. This parameter can be left empty or specified using the following keywords: + + - optname + + Specifies the option name. + + Value range: **dsn**, **username**, **password**, and **encoding**. The value is case-insensitive. + + - **dsn** corresponds to the DSN in the ODBC configuration file. + - **username**/**password** indicates the username and password for connecting to the destination database. + + The user name and password entered by the user are encrypted in the openGauss background to ensure security. The key file required for encryption must be generated using the **gs\_guc** tool and released to the _$GAUSSHOME_**/bin** directory of each node in openGauss using the **gs\_ssh** tool. The user name and password cannot contain the prefix "encryptOpt". Otherwise, they are considered as encrypted ciphertext. + + - **encoding** indicates the character string encoding mode used for interaction with the destination database \(including the sent SQL statements and returned data of the character type\). Its validity is not checked during object creation. Whether data can be encoded and decoded depends on whether the encoding you specified can be used in the database. + + - optvalue + + Specifies the option value. + + Value range: an empty string or a non-empty string + + + +## Examples + +``` +-- Create an empty data source that does not contain any information. +postgres=# CREATE DATA SOURCE ds_test1; + +-- Create a data source with TYPE information and VERSION being null. +postgres=# CREATE DATA SOURCE ds_test2 TYPE 'MPPDB' VERSION NULL; + +-- Create a data source that contains only OPTIONS. +postgres=# CREATE DATA SOURCE ds_test3 OPTIONS (dsn 'openGauss', encoding 'utf8'); + +-- Create a data source that contains TYPE, VERSION, and OPTIONS. +postgres=# CREATE DATA SOURCE ds_test4 TYPE 'unknown' VERSION '11.2.3' OPTIONS (dsn 'openGauss', username 'userid', password 'pwd@123456', encoding ''); + +-- Delete the data source. +postgres=# DROP DATA SOURCE ds_test1; +postgres=# DROP DATA SOURCE ds_test2; +postgres=# DROP DATA SOURCE ds_test3; +postgres=# DROP DATA SOURCE ds_test4; +``` + +## Helpful Links + +[ALTER DATA SOURCE](alter-data-source.md), [DROP DATA SOURCE](drop-data-source.md) + diff --git a/content/en/docs/Developerguide/create-database.md b/content/en/docs/Developerguide/create-database.md new file mode 100644 index 000000000..04d63e140 --- /dev/null +++ b/content/en/docs/Developerguide/create-database.md @@ -0,0 +1,170 @@ +# CREATE DATABASE + +## Function + +Create a database. By default, a new database is created by copying the standard system database template0. Only template0 can be used to create a new database. + +## Precautions + +- A user that has the **CREATEDB** permission or a system administrator can create a database. +- **CREATE DATABASE** cannot be executed inside a transaction block. +- Errors along the line of "could not initialize database directory" are most likely related to insufficient permissions on the data directory, a full disk, or other file system problems. + +## Syntax + +``` +CREATE DATABASE database_name + [ [ WITH ] { [ OWNER [=] user_name ] | + [ TEMPLATE [=] template ] | + [ ENCODING [=] encoding ] | + [ LC_COLLATE [=] lc_collate ] | + [ LC_CTYPE [=] lc_ctype ] | + [ DBCOMPATIBILITY [=] compatibilty_type ] | + [ TABLESPACE [=] tablespace_name ] | + [ CONNECTION LIMIT [=] connlimit ]}[...] ]; +``` + +## Parameter Description + +- **database\_name** + + Specifies the database name. + + Value range: a string. It must comply with the naming convention. + +- **OWNER \[ = \] user\_name** + + Specifies the owner of the new database. By default, the owner of a new database is the current user. + + Value range: an existing username + +- **TEMPLATE \[ = \] template** + + Specifies a template name. That is, the template from which the database is created. openGauss creates a database by copying data from a template database. openGauss has two default template databases **template0** and **template1** and a default user database **postgres**. + + Value range: template0. + +- **ENCODING \[ = \] encoding** + + Specifies the encoding format used by the new database. The value can be a string \(for example, **SQL\_ASCII**\) or an integer. + + By default, the encoding format of the template database is used. The encoding formats of the template databases **template0** and **template1** depend on the OS. The encoding format of **template1** cannot be changed. If you need to change the encoding format when creating a database, use **template0**. + + Common values : **GBK**, **UTF8**, and **Latin1** + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- The character set encoding of the new database must be compatible with the local settings \(**LC\_COLLATE** and **LC\_CTYPE**\). + >- When the specified character encoding set is **GBK**, some uncommon Chinese characters cannot be directly used as object names. This is because the byte encoding overlaps with the ASCII characters @A-Z\[\\\]^\_\`a-z\{|\} when the second byte of the GBK ranges from 0x40 to 0x7E. **@\[\\\]^\_'\{|\}** is an operator in the database. If it is directly used as an object name, a syntax error will be reported. For example, the GBK hexadecimal code is **0x8240**, and the second byte is **0x40**, which is the same as the ASCII character @. Therefore, the character cannot be used as an object name. If you do need to use this function, you can add double quotation marks \(""\) to avoid this problem when creating and accessing objects. + +- **LC\_COLLATE \[ = \] lc\_collate** + + Specifies the character set used by the new database. For example, set this parameter by using **lc\_collate = 'zh\_CN.gbk'**. + + The use of this parameter affects the sort order of strings \(for example, the order of using **ORDER BY** for execution and the order of using indexes on text columns\). By default, the sorting order of the template database is used. + + Value range: a valid sorting type + +- **LC\_CTYPE \[ = \] lc\_ctype** + + Specifies the character class used by the new database. For example, set this parameter by using **lc\_ctype = 'zh\_CN.gbk'**. The use of this parameter affects the classification of characters, such as uppercase letters, lowercase letters, and digits. By default, the character classification of the template database is used. + + Value range: a valid character type + +- **DBCOMPATIBILITY \[ = \] compatibilty\_type** + + Specifies the type of the compatible database. + + **Value range**: A, B, and C , indicating **O**, **MY**, and **TD** databases, respectively. + +- **TABLESPACE \[ = \] tablespace\_name** + + Specifies the tablespace of the database. + + Value range: an existing tablespace name + +- **CONNECTION LIMIT \[ = \] connlimit** + + Specifies the maximum number of concurrent connections that can be made to the new database. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- The system administrator is not restricted by this parameter. + >- connlimit is calculated separately for each master database node. Number of connections of the openGauss = connlimit x Number of normal CN master database nodes. + + Value range: an integer greater than or equal to -1 The default value is **-1**, indicating that there is no limit. + + +The restrictions on character encoding are as follows: + +- If the locale is set to **C** \(or **POSIX**\), all encoding types are allowed. For other locale settings, the character encoding must be the same as that of the locale. +- The encoding and region settings must match the template database, except that **template0** is used as a template. This is because other databases may contain data that does not match the specified encoding, or may contain indexes whose sorting order is affected by **LC\_COLLATE** and **LC\_CTYPE**. Copying this data will invalidate the indexes in the new database. **template0** does not contain any data or indexes that may be affected. + +## Examples + +``` +-- Create users jim and tom: +postgres=# CREATE USER jim PASSWORD 'Bigdata@123'; +postgres=# CREATE USER tom PASSWORD 'Bigdata@123'; + +-- Create database music using GBK (the local encoding type is also GBK): +postgres=# CREATE DATABASE music ENCODING 'GBK' template = template0; + +-- Create database music2 and specify user jim as its owner: +postgres=# CREATE DATABASE music2 OWNER jim; + +-- Create database music3 using template template0 and specify user jim as its owner: +postgres=# CREATE DATABASE music3 OWNER jim TEMPLATE template0; + +-- Set the maximum number of connections to database music to 10: +postgres=# ALTER DATABASE music CONNECTION LIMIT= 10; + +-- Rename database music to music4: +postgres=# ALTER DATABASE music RENAME TO music4; + +-- Change the owner of database music2 to user tom: +postgres=# ALTER DATABASE music2 OWNER TO tom; + +-- Set the tablespace of database music3 to PG_DEFAULT: +postgres=# ALTER DATABASE music3 SET TABLESPACE PG_DEFAULT; + +-- Disable the default index scan on database music3. +postgres=# ALTER DATABASE music3 SET enable_indexscan TO off; + +-- Reset the enable_indexscan parameter. +postgres=# ALTER DATABASE music3 RESET enable_indexscan; + +Delete the databases: +postgres=# DROP DATABASE music2; +postgres=# DROP DATABASE music3; +postgres=# DROP DATABASE music4; + +-- Delete the jim and tom users. +postgres=# DROP USER jim; +postgres=# DROP USER tom; + +-- Create a database compatible with the TD format. +postgres=# CREATE DATABASE td_compatible_db DBCOMPATIBILITY 'C'; + +-- Create a database compatible with the ORA format. +postgres=# CREATE DATABASE ora_compatible_db DBCOMPATIBILITY 'A'; + +-- Delete the databases that are compatible with the TD and ORA formats. +postgres=# DROP DATABASE td_compatible_db; +postgres=# DROP DATABASE ora_compatible_db; +``` + +## Helpful Links + +[ALTER DATABASE](alter-database.md) and [DROP DATABASE](drop-database.md) + +## Suggestions + +- **create database** + + Database cannot be created in a transaction. + + +- **ENCODING LC\_COLLATE LC\_CTYPE** + + If the new database Encoding, LC-Collate, or LC\_Ctype does not match the template database \(SQL\_ASCII\) \(**'GBK'**, **'UTF8'**, or **'LATIN1'**\), **template \[=\] template0** must be specified. + + diff --git a/content/en/docs/Developerguide/create-directory.md b/content/en/docs/Developerguide/create-directory.md new file mode 100644 index 000000000..91c3e82c4 --- /dev/null +++ b/content/en/docs/Developerguide/create-directory.md @@ -0,0 +1,55 @@ +# CREATE DIRECTORY + +## Function + +**CREATE DIRECTORY** creates a directory. The directory defines an alias for a path in the server file system and is used to store data files used by users. + +## Precautions + +- By default, only initial users can create directories. If **enable\_access\_server\_directory** is enabled \(for details, see [enable\_access\_server\_directory](operation-auditing.md#en-us_topic_0237124747_section4279164545515)\), users with the **sysadmin** permission can also create directories. +- By default, the user who creates a directory has the read and write permissions on the directory. +- The default owner of a directory is the user who creates the directory. +- A directory cannot be created for the following paths: + - The path contains special characters. + - The path is a relative path. + - The path is a symbolic link. + +- The following validity check is performed during directory creation: + - Check whether the path exists in the OS. If it does not exist, a message is displayed, indicating the potential risks. + - Check whether the database initial user omm has the R/W/X permissions for the OS path. If the user does not have all the permissions, a message is displayed, indicating the potential risks. + +- In openGauss, ensure that the path is the same on all the nodes. Otherwise, the path may fail to be found on some nodes when the directory is used. + +## Syntax + +``` +CREATE [OR REPLACE] DIRECTORY directory_name +AS 'path_name'; +``` + +## Parameter Description + +- **directory\_name** + + Specifies the name of a directory. + + Value range: a string. It must comply with the naming convention rule. + +- **path\_name** + + Specifies the OS path for which a directory is to be created. + + Value range: a valid OS path + + +## Examples + +``` +-- Create a directory. +postgres=# CREATE OR REPLACE DIRECTORY dir as '/tmp/'; +``` + +## Helpful Links + +[ALTER DIRECTORY](alter-directory.md) and [DROP DIRECTORY](drop-directory.md) + diff --git a/content/en/docs/Developerguide/create-function.md b/content/en/docs/Developerguide/create-function.md new file mode 100644 index 000000000..e126628e2 --- /dev/null +++ b/content/en/docs/Developerguide/create-function.md @@ -0,0 +1,296 @@ +# CREATE FUNCTION + +## Function + +**CREATE FUNCTION** creates a function. + +## Precautions + +- If the parameters or return values of a function have precision, the precision is not checked. +- When creating a function, you are advised to explicitly specify the schemas of tables in the function definition. Otherwise, the function may fail to be executed. +- **current\_schema** and **search\_path** specified by **SET** during function creation are invalid. **search\_path** and **current\_schema** before and after function execution should be the same. +- If a function has output parameters, the **SELECT** statement uses the default values of the output parameters when calling the function. When the **CALL** statement calls the function, it requires that the output parameters must be specified. When the **CALL** statement calls an overloaded **PACKAGE** function, it can use the default values of the output parameters. For details, see examples in [CALL](call.md). +- Only the functions compatible with PostgreSQL or those with the **PACKAGE** attribute can be overloaded. After **REPLACE** is specified, a new function is created instead of replacing a function if the number of parameters, parameter type, or return value is different. +- You can use the **SELECT** statement to specify different parameters using identical functions, but cannot use the **CALL** statement to call identical functions without the **PACKAGE** attribute. +- When you create a function, you cannot insert other agg functions out of the avg function or other functions. +- By default, the permissions to execute new functions are granted to **PUBLIC**. For details, see [GRANT](grant.md). You can revoke the default execution permissions from **PUBLIC** and grant them to other users as needed. To avoid the time window during which new functions can be accessed by all users, create functions in transactions and set function execution permissions. + +## Syntax + +- Syntax \(compatible with PostgreSQL\) for creating a customized function: + + ``` + CREATE [ OR REPLACE ] FUNCTION function_name + ( [ { argname [ argmode ] argtype [ { DEFAULT | := | = } expression ]} [, ...] ] ) + [ RETURNS rettype [ DETERMINISTIC ] | RETURNS TABLE ( { column_name column_type } [, ...] )] + LANGUAGE lang_name + [ + {IMMUTABLE | STABLE | VOLATILE } + | {SHIPPABLE | NOT SHIPPABLE} + | WINDOW + | [ NOT ] LEAKPROOF + | {CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } + | {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER | AUTHID DEFINER | AUTHID CURRENT_USER} + | {fenced | not fenced} + | {PACKAGE} + + | COST execution_cost + | ROWS result_rows + | SET configuration_parameter { {TO | =} value | FROM CURRENT }} + ][...] + { + AS 'definition' + } + + ``` + +- O syntax of creating a customized function: + + ``` + CREATE [ OR REPLACE ] FUNCTION function_name + ( [ { argname [ argmode ] argtype [ { DEFAULT | := | = } expression ] } [, ...] ] ) + RETURN rettype [ DETERMINISTIC ] + [ + {IMMUTABLE | STABLE | VOLATILE } + | {SHIPPABLE | NOT SHIPPABLE} + | {PACKAGE} + | {FENCED | NOT FENCED} + | [ NOT ] LEAKPROOF + | {CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } + | {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER | + AUTHID DEFINER | AUTHID CURRENT_USER + } + | COST execution_cost + | ROWS result_rows + | SET configuration_parameter { {TO | =} value | FROM CURRENT + + ][...] + + { + IS | AS + } plsql_body + / + ``` + + +## Parameter Description + +- **function\_name** + + Specifies the name of the function to create \(optionally schema-qualified\). + + Value range: a string. It must comply with the naming convention. + +- **argname** + + Specifies the parameter name of the function. + + Value range: a string. It must comply with the naming convention. + +- **argmode** + + Specifies the parameter mode of the function. + + Value range: **IN**, **OUT**, **INOUT**, and **VARIADIC**. The default value is **IN**. Only the parameter of the **OUT** mode can be followed by **VARIADIC**. The parameters of **OUT** and **INOUT** cannot be used in the function definition of **RETURNS TABLE**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >**VARIADIC** specifies parameters of the array type. + +- **argtype** + + Specifies the data type of a function parameter. + +- **expression** + + Specifies the default expression of a parameter. + +- **rettype** + + Specifies the return data type. + + When there is **OUT** or **INOUT** parameter, the **RETURNS** clause can be omitted. If the clause exists, the result type of the clause must be the same as that of the output parameter. If there are multiple output parameters, the result type of the clause is **RECORD**. Otherwise, the result type of the clause is the same as that of a single output parameter. + + The **SETOF** modifier indicates that the function will return a set of items, rather than a single item. + +- **column\_name** + + Specifies the column name. + +- **column\_type** + + Specifies the column type. + +- **definition** + + Specifies a string constant defining a function. Its meaning depends on the language. It can be an internal function name, a path pointing to a target file, a SQL query, or text in a procedural language. + +- **LANGUAGE lang\_name** + + Specifies the name of the language that is used to implement the function. It can be **SQL**, **internal**, or the name of a customized process language. To ensure downward compatibility, the name can use single quotation marks. Contents in single quotation marks must be capitalized. + +- **WINDOW** + + Indicates that this function is a window function. The **WINDOW** attribute cannot be changed when replacing an existing function definition. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >For a customized window function, the value of **LANGUAGE** can only be **internal**, and the referenced internal function must be a window function. + +- **IMMUTABLE** + + Specifies that the function always returns the same result if the parameter values are the same. + +- **STABLE** + + Specifies that the function cannot modify the database, and that within a single table scan it will consistently return the same result for the same parameter value, but its result varies by SQL statements. + +- **VOLATILE** + + Specifies that the function value can change in a single table scan and no optimization is performed. + +- **PACKAGE** + + Specifies whether the function can be overloaded. PostgreSQL-style functions can be overloaded, and this parameter is designed for functions of other styles. + + - All PACKAGE and non-PACKAGE functions cannot be overloaded or replaced. + - PACKAGE functions do not support parameters of the VARIADIC type. + - The **PACKAGE** attribute of functions cannot be modified. + +- **LEAKPROOF** + + Specifies that the function has no side effects. **LEAKPROOF** can be set only by the system administrator. + +- **CALLED ON NULL INPUT** + + Declares that some parameters of the function can be invoked in normal mode if the parameter values are null. This parameter can be omitted. + +- **RETURNS NULL ON NULL INPUT** + + **STRICT** + + Specifies that the function always returns null whenever any of its parameters is null. If this parameter is specified, the function is not executed when there are null parameters; instead a null result is returned automatically. + + **RETURNS NULL ON NULL INPUT** and **STRICT** have the same functions. + +- **EXTERNAL** + + The keyword **EXTERNAL** is allowed for SQL conformance, but it is optional since, unlike in SQL, this feature applies to all functions not only external ones. + +- **SECURITY INVOKER** + + **AUTHID CURRENT\_USER** + + Specifies that the function will be executed with the permissions of the user who invokes it. This parameter can be omitted. + + **SECURITY INVOKER** and **AUTHID CURRENT\_USER** have the same functions. + +- **SECURITY DEFINER** + + **AUTHID DEFINER** + + Specifies that the function will be executed with the permissions of the user who created it. + + **AUTHID DEFINER** and **SECURITY DEFINER** have the same functions. + +- **COST execution\_cost** + + Estimates the execution cost of a function. + + The unit of **execution\_cost** is **cpu\_operator\_cost**. + + Value range: a positive integer + +- **ROWS result\_rows** + + Estimates the number of rows returned by the function. This is only allowed when the function is declared to return a set. + + Value range: a positive number. The default value is **1000**. + +- **configuration\_parameter** + - **value** + + Sets a specified database session parameter to a specified value. If the value is **DEFAULT** or **RESET**, the default setting is used in the new session. **OFF** closes the setting. + + Value range: a string + + - DEFAULT + - OFF + - RESET + + Specifies the default value. + + - **from current** + + Uses the value of **configuration\_parameter** of the current session. + + +- **plsql\_body** + + Specifies the PL/SQL stored procedure body. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >When a user is created in the function body, the plaintext password is recorded in the log. You are not advised to do it. + + +## Examples + +``` +-- Define a function as SQL query. +postgres=# CREATE FUNCTION func_add_sql(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; + +-- Add an integer by parameter name using PL/pgSQL. +postgres=# CREATE OR REPLACE FUNCTION func_increment_plsql(i integer) RETURNS integer AS $$ + BEGIN + RETURN i + 1; + END; +$$ LANGUAGE plpgsql; + +-- Return the RECORD type. +CREATE OR REPLACE FUNCTION compute(i int, out result_1 bigint, out result_2 bigint) +returns SETOF RECORD +as $$ +begin + result_1 = i + 1; + result_2 = i * 10; +return next; +end; +$$language plpgsql; + +-- Return a record containing multiple output parameters. +postgres=# CREATE FUNCTION func_dup_sql(in int, out f1 int, out f2 text) + AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ + LANGUAGE SQL; + +postgres=# SELECT * FROM func_dup_sql(42); + +-- Compute the sum of two integers and return the result (if the input is null, the returned result is null). +postgres=# CREATE FUNCTION func_add_sql2(num1 integer, num2 integer) RETURN integer +AS +BEGIN +RETURN num1 + num2; +END; +/ +-- Alter the execution rule of function add to IMMUTABLE (that is, the same result is returned if the parameter remains unchanged). +postgres=# ALTER FUNCTION func_add_sql2(INTEGER, INTEGER) IMMUTABLE; + +-- Alter the name of function add to add_two_number. +postgres=# ALTER FUNCTION func_add_sql2(INTEGER, INTEGER) RENAME TO add_two_number; + +-- Change the owner of function add to omm. +postgres=# ALTER FUNCTION add_two_number(INTEGER, INTEGER) OWNER TO omm; + +-- Delete the function. +postgres=# DROP FUNCTION add_two_number; +postgres=# DROP FUNCTION func_increment_sql; +postgres=# DROP FUNCTION func_dup_sql; +postgres=# DROP FUNCTION func_increment_plsql; +postgres=# DROP FUNCTION func_add_sql; +``` + +## Helpful Links + +[ALTER FUNCTION](alter-function.md) and [DROP FUNCTION](drop-function.md) + diff --git a/content/en/docs/Developerguide/create-group.md b/content/en/docs/Developerguide/create-group.md new file mode 100644 index 000000000..5e56391ce --- /dev/null +++ b/content/en/docs/Developerguide/create-group.md @@ -0,0 +1,56 @@ +# CREATE GROUP + +## Function + +**CREATE GROUP** creates a user group. + +## Precautions + +**CREATE GROUP** is an alias for **CREATE ROLE**, and it is not a standard SQL syntax and not recommended. Users can use **CREATE ROLE** directly. + +## Syntax + +``` +CREATE GROUP group_name [ [ WITH ] option [ ... ] ] [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' | DISABLE }; +``` + +The syntax of the **option** clause is as follows: + +``` +{SYSADMIN | NOSYSADMIN} + | {AUDITADMIN | NOAUDITADMIN} + | {CREATEDB | NOCREATEDB} + | {USEFT | NOUSEFT} + | {CREATEROLE | NOCREATEROLE} + | {INHERIT | NOINHERIT} + | {LOGIN | NOLOGIN} + | {REPLICATION | NOREPLICATION} + | {INDEPENDENT | NOINDEPENDENT} + | {VCADMIN | NOVCADMIN} + | CONNECTION LIMIT connlimit + | VALID BEGIN 'timestamp' + | VALID UNTIL 'timestamp' + | RESOURCE POOL 'respool' + | PERM SPACE 'spacelimit' + | TEMP SPACE 'tmpspacelimit' + | SPILL SPACE 'spillspacelimit' + | IN ROLE role_name [, ...] + | IN GROUP role_name [, ...] + | ROLE role_name [, ...] + | ADMIN rol e_name [, ...] + | USER role_name [, ...] + | SYSID uid + | DEFAULT TABLESPACE tablespace_name + | PROFILE DEFAULT + | PROFILE profile_name + | PGUSER +``` + +## Parameter Description + +See [Parameter Description](create-role.md#en-us_topic_0237122112_en-us_topic_0059778189_s5a43ec5742a742089e2c302063de7fe4) in **CREATE ROLE**. + +## Helpful Links + +[ALTER GROUP](alter-group.md), [DROP GROUP](drop-group.md), and [CREATE ROLE](create-role.md) + diff --git a/content/en/docs/Developerguide/create-index.md b/content/en/docs/Developerguide/create-index.md new file mode 100644 index 000000000..af2967dfa --- /dev/null +++ b/content/en/docs/Developerguide/create-index.md @@ -0,0 +1,314 @@ +# CREATE INDEX + +## Function + +**CREATE INDEX** creates an index in a specified table. + +Indexes are primarily used to enhance database performance \(though inappropriate use can result in database performance deterioration\). You are advised to create indexes on: + +- Columns that are often queried +- Join conditions. For a query on joined columns, you are advised to create a composite index on the columns, for example, **select \* from t1 join t2 on t1.a=t2.a and t1.b=t2.b**. You can create a composite index on columns **a** and **b** in table **t1**. +- Columns having filter criteria \(especially scope criteria\) of a **where** clause +- Columns that appear after **order by**, **group by**, and **distinct** + +Partitioned tables do not support concurrent index creation, partial index creation, and **NULL FIRST**. + +## Precautions + +- Indexes consume storage and computing resources. Creating too many indexes has negative impact on database performance \(especially the performance of data import. Therefore, you are advised to import the data before creating indexes\). Therefore, create indexes only when they are necessary. +- All functions and operators used in an index definition must be immutable, that is, their results must depend only on their parameters and never on any outside influence \(such as the contents of another table or the current time\). This restriction ensures that the behavior of the index is well-defined. To use a customized function in an index expression or **WHERE** clause, remember to mark the function **immutable** when you create it. +- A unique index created on a partitioned table must include a partitioned column and all the partition keys. +- Column-store tables support B-tree and psort indexes. If the two indexes are used, you cannot create expression, partial, and unique indexes. +- Column-store tables support GIN indexes, rather than partial indexes and unique indexes. If GIN indexes are used, you can create expression indexes. However, an expression in this situation cannot contain empty splitters, empty columns, or multiple columns. + +## Syntax + +- Create an index on a table. + + ``` + CREATE [ UNIQUE ] INDEX [ [schemaname.]index_name ] ON table_name [ USING method ] + ({ { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] }[, ...] ) + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ TABLESPACE tablespace_name ] + [ WHERE predicate ]; + ``` + +- Create an index on a partitioned table. + + ``` + CREATE [ UNIQUE ] INDEX [ [schemaname.]index_name ] ON table_name [ USING method ] + ( {{ column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS LAST ] }[, ...] ) + LOCAL [ ( { PARTITION index_partition_name [ TABLESPACE index_partition_tablespace ] } [, ...] ) ] + [ WITH ( { storage_parameter = value } [, ...] ) ] + [ TABLESPACE tablespace_name ]; + ``` + + +## Parameter Description + +- **UNIQUE** + + Creates a unique index. In this way, the system checks whether new values are unique in the index column. Attempts to insert or update data which would result in duplicate entries will generate an error. + + Currently, only B-tree supports **UNIQUE** indexes. + +- **schema\_name** + + Specifies the schema name. + + Value range: an existing schema name + +- **index\_name** + + Specifies the name of the index to create. No schema name can be included here; the index is always created in the same schema as its parent table. + + Value range: a string. It must comply with the naming convention. + +- **table\_name** + + Specifies the name of the table to be indexed \(optionally schema-qualified\). + + Value range: an existing table name + +- **USING method** + + Specifies the name of the index method to be used. + + Value range: + + - **btree**: B-tree indexes store key values of data in a B+ tree structure. This structure helps users to quickly search for indexes. B-tree supports comparison queries with a scope specified. + - **gin**: GIN indexes are reverse indexes and can process values that contain multiple keys \(for example, arrays\). + - **gist**: GiST indexes are suitable for the set data type and multidimensional data types, such as geometric and geographic data types. + - **Psort**: psort index. It is used to perform partial sort on column-store tables. + + Row-store tables support the following index types: **btree** \(default\), **gin**, and **gist**. Column-store tables support the following index types: **Psort** \(default\), **btree**, and **gin**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >Column-store tables support GIN indexes only for the tsvector type. That is, the input parameter for creating a column-store GIN index must be the return value of the **to\_tsvector** function. This method is commonly used for GIN indexes. + +- **column\_name** + + Specifies the name of the column on which an index is to be created. + + Multiple columns can be specified if the index method supports multi-column indexes. A maximum of 32 columns can be specified. + +- **expression** + + Specifies an expression based on one or more columns of the table. The expression usually must be written with surrounding parentheses, as shown in the syntax. However, the parentheses can be omitted if the expression has the form of a function call. + + Expression can be used to obtain fast access to data based on some transformation of the basic data. For example, an index computed on **upper\(col\)** would allow the clause **WHERE upper\(col\) = 'JIM'** to use an index. + + If an expression contains **IS NULL**, the index for this expression is invalid. In this case, you are advised to create a partial index. + +- **COLLATE collation** + + Assigns a collation to the column \(which must be of a collatable data type\). If no collation is specified, the default collation is used. + +- **opclass** + + Specifies the name of an operator class. An operator class can be specified for each column of an index. The operator class identifies the operators to be used by the index for that column. For example, a B-tree index on the type int4 would use the **int4\_ops** class; this operator class includes comparison functions for values of type int4. In practice, the default operator class for the column's data type is sufficient. The operator class applies to data with multiple sorts. For example, users might want to sort a complex-number data type either by absolute value or by real part. They could do this by defining two operator classes for the data type and then selecting the proper class when making an index. + +- **ASC** + + Specifies an ascending \(default\) sort order. + +- **DESC** + + Specifies a descending sort order. + +- **NULLS FIRST** + + Specifies that null values appear before non-null values in the sort ordering. This is the default when **DESC** is specified. + +- **NULLS LAST** + + Specifies that null values appear after non-null values in the sort ordering. This is the default when **DESC** is not specified. + +- **WITH \( \{storage\_parameter = value\} \[, ... \] \)** + + Specifies the storage parameter used for an index. + + Value range: + + Only index GIN supports parameters **FASTUPDATE** and **GIN\_PENDING\_LIST\_LIMIT**. Indexes other than GIN and psort support the **FILLFACTOR** parameter. + + - FILLFACTOR + + The fill factor of an index is a percentage from 10 to 100. + + Value range: 10–100 + + - FASTUPDATE + + Specifies whether fast update is enabled for the GIN index. + + Value range: : **ON** and **OFF** + + Default value: **ON** + + - GIN\_PENDING\_LIST\_LIMIT + + Specifies the maximum capacity of the pending list of the GIN index when fast update is enabled for the GIN index. + + Value range: 64–_INT\_MAX_. The unit is KB. + + Default value: The default value of **gin\_pending\_list\_limit** depends on **gin\_pending\_list\_limit** specified in GUC parameters. By default, the value is **4**. + + +- **TABLESPACE tablespace\_name** + + Specifies the tablespace for an index. If no tablespace is specified, the default tablespace is used. + + Value range: an existing table name + +- **WHERE predicate** + + Creates a partial index. A partial index is an index that contains entries for only a portion of a table, usually a portion that is more useful for indexing than the rest of the table. For example, if you have a table that contains both billed and unbilled orders where the unbilled orders take up a small fraction of the total table and yet that is an often used portion, you can improve performance by creating an index on just that portion. In addition, **WHERE** with **UNIQUE** can be used to enforce uniqueness over a subset for a table. + + Value range: The predicate expression can only refer to columns of the underlying table, but it can use all columns, not just the ones being indexed. Currently, subqueries and aggregate expressions are forbidden in **WHERE**. + +- **PARTITION index\_partition\_name** + + Specifies the name of an index partition. + + Value range: a string. It must comply with the naming convention. + +- **TABLESPACE index\_partition\_tablespace** + + Specifies the tablespace of an index partition. + + Value range: If this parameter is not specified, the value of **index\_tablespace** is used. + + +## Examples + +``` +-- Create the tpcds.ship_mode_t1 table. +postgres=# create schema tpcds; +postgres=# CREATE TABLE tpcds.ship_mode_t1 +( + SM_SHIP_MODE_SK INTEGER NOT NULL, + SM_SHIP_MODE_ID CHAR(16) NOT NULL, + SM_TYPE CHAR(30) , + SM_CODE CHAR(10) , + SM_CARRIER CHAR(20) , + SM_CONTRACT CHAR(20) +) +; + +-- Create a common index on the SM_SHIP_MODE_SK column in the tpcds.ship_mode_t1 table. +postgres=# CREATE UNIQUE INDEX ds_ship_mode_t1_index1 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK); + +-- Create a B-tree index on the SM_SHIP_MODE_SK column in the tpcds.ship_mode_t1 table. +postgres=# CREATE INDEX ds_ship_mode_t1_index4 ON tpcds.ship_mode_t1 USING btree(SM_SHIP_MODE_SK); + +-- Create an expression index on the SM_CODE column in the tpcds.ship_mode_t1 table: +postgres=# CREATE INDEX ds_ship_mode_t1_index2 ON tpcds.ship_mode_t1(SUBSTR(SM_CODE,1 ,4)); + +-- Create a partial index on the SM_SHIP_MODE_SK column where SM_SHIP_MODE_SK is greater than 10 in the tpcds.ship_mode_t1 table. +postgres=# CREATE UNIQUE INDEX ds_ship_mode_t1_index3 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK) WHERE SM_SHIP_MODE_SK>10; + +-- Rename an existing index. +postgres=# ALTER INDEX tpcds.ds_ship_mode_t1_index1 RENAME TO ds_ship_mode_t1_index5; + +-- Set the index as unusable. +postgres=# ALTER INDEX tpcds.ds_ship_mode_t1_index2 UNUSABLE; + +-- Rebuild an index. +postgres=# ALTER INDEX tpcds.ds_ship_mode_t1_index2 REBUILD; + +-- Delete an existing index. +postgres=# DROP INDEX tpcds.ds_ship_mode_t1_index2; + +-- Delete the table. +postgres=# DROP TABLE tpcds.ship_mode_t1; + +-- Create a tablespace. +postgres=# CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; +postgres=# CREATE TABLESPACE example2 RELATIVE LOCATION 'tablespace2/tablespace_2'; +postgres=# CREATE TABLESPACE example3 RELATIVE LOCATION 'tablespace3/tablespace_3'; +postgres=# CREATE TABLESPACE example4 RELATIVE LOCATION 'tablespace4/tablespace_4'; +-- Create the tpcds.customer_address_p1 table. +postgres=# CREATE TABLE tpcds.customer_address_p1 +( + CA_ADDRESS_SK INTEGER NOT NULL, + CA_ADDRESS_ID CHAR(16) NOT NULL, + CA_STREET_NUMBER CHAR(10) , + CA_STREET_NAME VARCHAR(60) , + CA_STREET_TYPE CHAR(15) , + CA_SUITE_NUMBER CHAR(10) , + CA_CITY VARCHAR(60) , + CA_COUNTY VARCHAR(30) , + CA_STATE CHAR(2) , + CA_ZIP CHAR(10) , + CA_COUNTRY VARCHAR(20) , + CA_GMT_OFFSET DECIMAL(5,2) , + CA_LOCATION_TYPE CHAR(20) +) +TABLESPACE example1 +PARTITION BY RANGE(CA_ADDRESS_SK) +( + PARTITION p1 VALUES LESS THAN (3000), + PARTITION p2 VALUES LESS THAN (5000) TABLESPACE example1, + PARTITION p3 VALUES LESS THAN (MAXVALUE) TABLESPACE example2 +) +ENABLE ROW MOVEMENT; +-- Create the partitioned table index ds_customer_address_p1_index1 without specifying the index partition name. +postgres=# CREATE INDEX ds_customer_address_p1_index1 ON tpcds.customer_address_p1(CA_ADDRESS_SK) LOCAL; +-- Create the partitioned table index ds_customer_address_p1_index2 with the name of the index partition specified. +postgres=# CREATE INDEX ds_customer_address_p1_index2 ON tpcds.customer_address_p1(CA_ADDRESS_SK) LOCAL +( + PARTITION CA_ADDRESS_SK_index1, + PARTITION CA_ADDRESS_SK_index2 TABLESPACE example3, + PARTITION CA_ADDRESS_SK_index3 TABLESPACE example4 +) +TABLESPACE example2; + +-- Change the tablespace of the partitioned table index CA_ADDRESS_SK_index2 to example1. +postgres=# ALTER INDEX tpcds.ds_customer_address_p1_index2 MOVE PARTITION CA_ADDRESS_SK_index2 TABLESPACE example1; + +-- Change the tablespace of the partitioned table index CA_ADDRESS_SK_index3 to example2. +postgres=# ALTER INDEX tpcds.ds_customer_address_p1_index2 MOVE PARTITION CA_ADDRESS_SK_index3 TABLESPACE example2; + +-- Rename a partitioned table index. +postgres=# ALTER INDEX tpcds.ds_customer_address_p1_index2 RENAME PARTITION CA_ADDRESS_SK_index1 TO CA_ADDRESS_SK_index4; + +-- Delete the created indexes and the partitioned table. +postgres=# DROP INDEX tpcds.ds_customer_address_p1_index1; +postgres=# DROP INDEX tpcds.ds_customer_address_p1_index2; +postgres=# DROP TABLE tpcds.customer_address_p1; +-- Delete the tablespace. +postgres=# DROP TABLESPACE example1; +postgres=# DROP TABLESPACE example2; +postgres=# DROP TABLESPACE example3; +postgres=# DROP TABLESPACE example4; + +-- Create a column-store table and its GIN index: +postgres=# create table cgin_create_test(a int, b text) with (orientation = column); +CREATE TABLE +postgres=# create index cgin_test on cgin_create_test using gin(to_tsvector('ngram', b)); +CREATE INDEX +``` + +## Helpful Links + +[ALTER INDEX](alter-index.md) and [DROP INDEX](drop-index.md) + +## Suggestions + +- create index + + You are advised to create indexes on: + + - Columns that are often queried + - Join conditions. For a query on joined columns, you are advised to create a composite index on the columns, for example, **select \* from t1 join t2 on t1.a=t2.a and t1.b=t2.b**. You can create a composite index on columns **a** and **b** in table **t1**. + - Columns having filter criteria \(especially scope criteria\) of a **where** clause + - Columns that appear after **order by**, **group by**, and **distinct** + + Constraints: + + - Partitioned tables do not support partial indexes or the **NULL FIRST** feature. + + - A unique index created on a partitioned table must include a partitioned column and all the partition keys. + + diff --git a/content/en/docs/Developerguide/create-procedure.md b/content/en/docs/Developerguide/create-procedure.md new file mode 100644 index 000000000..d7f0d15b1 --- /dev/null +++ b/content/en/docs/Developerguide/create-procedure.md @@ -0,0 +1,95 @@ +# CREATE PROCEDURE + +## Function + +**CREATE PROCEDURE** creates a stored procedure. + +## Precautions + +- If the parameters or return values of a stored procedure have precision, the precision is not checked. +- When creating a stored procedure, you are advised to explicitly specify the schemas of all operations on table objects in the stored procedure definition. Otherwise, the stored procedure may fail to be executed. +- **current\_schema** and **search\_path** specified by **SET** during stored procedure creation are invalid. **search\_path** and **current\_schema** before and after function execution should be the same. +- If a stored procedure has output parameters, the **SELECT** statement uses the default values of the output parameters when calling the procedure. When the **CALL** statement calls the stored procedure or a non-overloaded function, output parameters must be specified. When the **CALL** statement calls an overloaded **PACKAGE** function, it can use the default values of the output parameters. For details, see examples in [CALL](call.md). +- A stored procedure with the **PACKAGE** attribute can use overloaded functions. +- When you create a procedure, you cannot insert aggregate functions or other functions out of the average function. + +## Syntax + +``` +postgres=# CREATE [ OR REPLACE ] PROCEDURE procedure_name + [ ( {[ argmode ] [ argname ] argtype [ { DEFAULT | := | = } expression ]}[,...]) ] + [ + { IMMUTABLE | STABLE | VOLATILE } + | { SHIPPABLE | NOT SHIPPABLE } + | {PACKAGE} + | [ NOT ] LEAKPROOF + | { CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } + | {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER | AUTHID DEFINER | AUTHID CURRENT_USER} + | COST execution_cost + | ROWS result_rows + | SET configuration_parameter { [ TO | = ] value | FROM CURRENT } + ][ ... ] + { IS | AS } +plsql_body +/ +``` + +## Parameter Description + +- **OR REPLACE** + + Replaces the original definition when two stored procedures are with the same name. + +- **procedure\_name** + + Specifies the name of the stored procedure that is created \(optionally with schema names\). + + Value range: a string. It must comply with the naming convention. + +- **argmode** + + Specifies the mode of an argument. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >**VARIADIC** specifies parameters of the array type. + + Value range: **IN**, **OUT**, **INOUT**, and **VARIADIC**. The default value is **IN**. Only the parameter of the **OUT** mode can be followed by **VARIADIC**. The parameters of **OUT** and **INOUT** cannot be used in procedure definition of **RETURNS TABLE**. + +- **argname** + + Specifies the argument name. + + Value range: a string. It must comply with the naming convention. + +- **argtype** + + Specifies the type of an argument. + + Value range: a valid data type + +- **IMMUTABLE, STABLE,**... + + Specifies a constraint. The function of each parameter is similar to that of **CREATE FUNCTION**. For details, see [CREATE FUNCTION](create-function.md). + +- **plsql\_body** + + Specifies the PL/SQL stored procedure body. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >When you create a user, or perform other operations requiring password input in a stored procedure, the system catalog and CSV log record the password in plaintext. Therefore, you are advised not to perform such operations in the stored procedure. + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>No specific order is applied to **argument\_name** and **argmode**. The following order is advised: **argument\_name**, **argmode**, and **argument\_type**. + +## Helpful Links + +[DROP PROCEDURE](drop-procedure.md) + +## Suggestions + +- analyse | analyze + - Do not run **ANALYZE** in a transaction or anonymous block. + - Do not run **ANALYZE** in a function or stored procedure. + + diff --git a/content/en/docs/Developerguide/create-role.md b/content/en/docs/Developerguide/create-role.md new file mode 100644 index 000000000..2b1eab5c0 --- /dev/null +++ b/content/en/docs/Developerguide/create-role.md @@ -0,0 +1,236 @@ +# CREATE ROLE + +## Function + +**CREATE ROLE** is used to create a role. + +A role is an entity that owns database objects and permissions. In different environments, a role can be considered a user, a group, or both. + +## Precautions + +- **CREATE ROLE** adds a role to a database. The role does not have the **LOGIN** permission. +- Only the user who has the **CREATE ROLE** permission or a system administrator is allowed to create roles. + +## Syntax + +``` +CREATE ROLE role_name [ [ WITH ] option [ ... ] ] [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' | DISABLE }; +``` + +The syntax of role information configuration clause **option** is as follows: + +``` +{SYSADMIN | NOSYSADMIN} + | {AUDITADMIN | NOAUDITADMIN} + | {CREATEDB | NOCREATEDB} + | {USEFT | NOUSEFT} + | {CREATEROLE | NOCREATEROLE} + | {INHERIT | NOINHERIT} + | {LOGIN | NOLOGIN} + | {REPLICATION | NOREPLICATION} + | {INDEPENDENT | NOINDEPENDENT} + | {VCADMIN | NOVCADMIN} + | CONNECTION LIMIT connlimit + | VALID BEGIN 'timestamp' + | VALID UNTIL 'timestamp' + | RESOURCE POOL 'respool' + | PERM SPACE 'spacelimit' + | TEMP SPACE 'tmpspacelimit' + | SPILL SPACE 'spillspacelimit' + | IN ROLE role_name [, ...] + | IN GROUP role_name [, ...] + | ROLE role_name [, ...] + | ADMIN rol e_name [, ...] + | USER role_name [, ...] + | SYSID uid + | DEFAULT TABLESPACE tablespace_name + | PROFILE DEFAULT + | PROFILE profile_name + | PGUSER +``` + +## Parameter Description + +- **role\_name** + + Specifies the name of a role. + + Value range: a string. It must comply with the naming convention rule, and can contain a maximum of 63 characters. If the value contains more than 63 characters, the database truncates it and retains the first 63 characters as the role name. When a role is created, the database will display a message. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The identifier must be letters, underscores \(\_\), digits \(0-9\), or dollar signs \($\) and must start with a letter \(a-z\) or underscore \(\_\). + +- **password** + + Specifies the login password. + + The new password must: + + - Contain at least eight characters. This is the default length. + - Differ from the username or the username spelled backward. + - Contain at least three types of the following four types of characters: uppercase characters \(A to Z\), lowercase characters \(a to z\), digits \(0 to 9\), and special characters, including: \~!@\#$%^&\*\(\)-\_=+\\|\[\{\}\];:,<.\>/? + + Value range: a string + +- DISABLE + + By default, you can change your password unless it is disabled. To disable the password of a user, use this parameter. After the password of a user is disabled, the password will be deleted from the system. The user can connect to the database only through external authentication, for example, Kerberos authentication. Only administrators can enable or disable a password. Common users cannot disable the password of an initial user. To enable a password, run **ALTER USER** and specify the password. + +- **ENCRYPTED | UNENCRYPTED** + + Controls whether the password is stored encrypted in the system catalogs. \(If neither is specified, the default behavior is determined by the configuration parameter **password\_encryption**.\) According to product security requirement, the password must be stored encrypted. Therefore, **UNENCRYPTED** is forbidden in openGauss. If the password string has already been encrypted in the SHA256 format, it is stored encrypted as it was, regardless of whether **ENCRYPTED** or **UNENCRYPTED** is specified \(since the system cannot decrypt the specified encrypted password string\). This allows reloading of encrypted passwords during dump/restore. + +- **SYSADMIN | NOSYSADMIN** + + Determines whether a new role is a system administrator. Roles having the **SYSADMIN** attribute have the highest permission. + + Value range: If not specified, **NOSYSADMIN** is the default. + +- **AUDITADMIN | NOAUDITADMIN** + + Determines whether a role has the audit and management attributes. + + If not specified, **NOAUDITADMIN** is the default. + +- **CREATEDB | NOCREATEDB** + + Determines a role's permission to create databases. + + A new role does not have the permission to create databases. + + Value range: If not specified, **NOCREATEDB** is the default. + +- **USEFT | NOUSEFT** + + This parameter is reserved and not used in this version. + +- **CREATEROLE | NOCREATEROLE** + + Determines whether a role will be permitted to create new roles \(that is, execute **CREATE ROLE** and **CREATE USER**\). A role with the **CREATEROLE** permission can also modify and delete other roles. + + Value range: If not specified, **NOCREATEROLE** is the default. + +- **INHERIT | NOINHERIT** + + Determines whether a role "inherits" the permissions of roles in the same group. You are not advised to set this parameter. + +- **LOGIN | NOLOGIN** + + Determines whether a role is allowed to log in to a database. A role having the **LOGIN** attribute can be considered as a user. + + Value range: If not specified, **NOLOGIN** is the default. + +- **REPLICATION | NOREPLICATION** + + Determines whether a role is allowed to initiate streaming replication or put the system in and out of backup mode. A role having the **REPLICATION** attribute is specific to replication. + + If not specified, **NOREPLICATION** is the default. + +- **INDEPENDENT | NOINDEPENDENT** + + Defines private, independent roles. For a role with the **INDEPENDENT** attribute, administrators' permissions to control and access this role are separated. The rules are as follows: + + - Administrators have no permission to add, delete, query, modify, copy, or authorize the corresponding table objects without the authorization from the **INDEPENDENT** role. + - System administrators and security administrators with the **CREATEROLE** attribute have no permission to modify the inheritance relationship of the **INDEPENDENT** role without the authorization of the **INDEPENDENT** role. + - System administrators have no permission to modify the owner of the table objects for the **INDEPENDENT** role. + - System administrators and security administrators with the **CREATEROLE** attribute have no permission to remove the **INDEPENDENT** attribute of the **INDEPENDENT** role. + - System administrators and security administrators with the **CREATEROLE** attribute have no permission to change the database password of the **INDEPENDENT** role. The **INDEPENDENT** role must manage its own password. If the password is lost, it cannot be reset. + - The **SYSADMIN** attribute of a user cannot be changed to the **INDEPENDENT** attribute. + +- **CONNECTION LIMIT** + + Specifies how many concurrent connections the role can make. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- The system administrator is not restricted by this parameter. + >- The number of concurrent connections of each primary database node is calculated separately \(which is the value of **connlimit**\). The number of all connections of openGauss = Value of **connlimit** x Number of normal primary database nodes. + + Value range: an integer greater than or equal to -1. The default value is **-1**, which means unlimited. + +- **VALID BEGIN** + + Sets a date and time when the role's password takes effect. If this clause is omitted, the password takes effect immediately. + +- **VALID UNTIL** + + Sets a date and time after which the role's password is no longer valid. If this clause is omitted, the password will be valid for all time. + +- **RESOURCE POOL** + + Sets the name of resource pool used by the role. The name belongs to the system catalog **pg\_resource\_pool**. + +- **PERM SPACE** + + Sets the space available for a user. + +- **TEMP SPACE** + + Sets the space allocated to the temporary table of a user. + +- **SPILL SPACE** + + Sets the operator disk flushing space of a user. + +- **IN ROLE** + + Lists one or more existing roles to which the new role will be immediately added as a new member. You are not advised to set this parameter. + +- **IN GROUP** + + Specifies an obsolete spelling of **IN ROLE**. You are not advised to set this parameter. + +- **ROLE** + + Lists one or more existing roles which are automatically added as members of the new role. + +- **ADMIN** + + Similar to **ROLE**. However, **ADMIN** grants permissions of new roles to other roles. + +- **USER** + + Specifies an obsolete spelling of the **ROLE** clause. + +- **SYSID** + + The **SYSID** clause is ignored. + +- **DEFAULT TABLESPACE** + + The **DEFAULT TABLESPACE** clause is ignored. + +- **PROFILE** + + The **PROFILE** clause is ignored. + +- **PGUSER** + + In the current version, this attribute is reserved only for forward compatibility. + + +## Example: + +``` +-- Create role manager whose password is Bigdata123@. +postgres=# CREATE ROLE manager IDENTIFIED BY 'Bigdata@123'; + +-- Create a role with its validity from January 1, 2015 to January 1, 2026. +postgres=# CREATE ROLE miriam WITH LOGIN PASSWORD 'Bigdata@123' VALID BEGIN '2015-01-01' VALID UNTIL '2026-01-01'; + +-- Change the password of role manager to abcd@123. +postgres=# ALTER ROLE manager IDENTIFIED BY 'abcd@123' REPLACE 'Bigdata@123'; + +-- Change role manager to the system administrator. +postgres=# ALTER ROLE manager SYSADMIN; + +-- Delete role manager. +postgres=# DROP ROLE manager; + +-- Delete role miriam. +postgres=# DROP ROLE miriam; +``` + +## Helpful Links + +[SET ROLE](set-role.md), [ALTER ROLE](alter-role.md), [DROP ROLE](drop-role.md), and [GRANT](grant.md) + diff --git a/content/en/docs/Developerguide/create-row-level-security-policy.md b/content/en/docs/Developerguide/create-row-level-security-policy.md new file mode 100644 index 000000000..5a4347601 --- /dev/null +++ b/content/en/docs/Developerguide/create-row-level-security-policy.md @@ -0,0 +1,224 @@ +# CREATE ROW LEVEL SECURITY POLICY + +## Function + +**CREATE ROW LEVEL SECURITY POLICY** creates a row-level access control policy for a table. + +The policy takes effect only after row-level access control is enabled \(by running **ALTER TABLE... ENABLE ROW LEVEL SECURITY**\). Otherwise, this statement does not take effect. + +Currently, row-level access control affects the read \(**SELECT**, **UPDATE**, **DELETE**\) of data tables and does not affect the write \(**INSERT** and **MERGE INTO**\) of data tables. The table owner or system administrators can create an expression in the **USING** clause. When the client reads the data table, the database server combines the expressions that meet the condition and applies it to the execution plan in the statement rewriting phase of a query. For each tuple in a data table, if the expression returns **TRUE**, the tuple is visible to the current user; if the expression returns **FALSE** or **NULL**, the tuple is invisible to the current user. + +A row-level access control policy name is specific to a table. A data table cannot have row-level access control policies with the same name. Different data tables can have the same row-level access control policy. + +Row-level access control policies can be applied to specified operations \(**SELECT**, **UPDATE**, **DELETE**, and **ALL**\). **ALL** indicates that **SELECT**, **UPDATE**, and **DELETE** will be affected. For a new row-level access control policy, the default value **ALL** will be used if you do not specify the operations that will be affected. + +Row-level access control policies can be applied to a specified user \(role\) or to all users \(**PUBLIC**\). For a new row-level access control policy, the default value **PUBLIC** will be used if you do not specify the user that will be affected. + +## Precautions + +- Row-level access control policies can be defined for row-store tables, row-store partitioned tables, column-store tables, column-store partitioned tables, replication tables, unlogged tables, and hash tables. +- Row-level access control policies cannot be defined for foreign tables and temporary tables. +- Row-level access control policies cannot be defined for views. +- A maximum of 100 row-level access control policies can be defined for a table. +- System administrators are not affected by row-level access control policies and can view all data in a table. +- Tables queried by using SQL statements, views, functions, and stored procedures are affected by row-level access control policies. + +## Syntax + +``` +CREATE [ ROW LEVEL SECURITY ] POLICY policy_name ON table_name + [ AS { PERMISSIVE | RESTRICTIVE } ] + [ FOR { ALL | SELECT | INSERT | UPDATE | DELETE } ] + [ TO { role_name | PUBLIC | CURRENT_USER | SESSION_USER } [, ...] ] + USING ( using_expression ) +``` + +## Parameter Description + +- **policy\_name** + + Specifies the name of a row-level access control policy to be created. The names of row-level access control policies for a table must be unique. + +- **table\_name** + + Specifies the name of a table to which a row-level access control policy is applied. + +- **command** + + Specifies the SQL operations affected by a row-level access control policy, including **ALL**, **SELECT**, **UPDATE**, and **DELETE**. If this parameter is not specified, the default value **ALL** will be used, covering **SELECT**, **UPDATE**, and **DELETE**. + + If _command_ is set to **SELECT**, only tuple data that meets the condition \(the return value of _using\_expression_ is **TRUE**\) can be queried. The operations that are affected include **SELECT**, **UPDATE.... RETURNING**, and **DELETE... RETURNING**. + + If _command_ is set to **UPDATE**, only tuple data that meets the condition \(the return value of _using\_expression_ is **TRUE**\) can be updated. The operations that are affected include **UPDATE**, **UPDATE ... RETURNING**, and **SELECT ... FOR UPDATE/SHARE**. + + If _command_ is set to **DELETE**, only tuple data that meets the condition \(the return value of _using\_expression_ is **TRUE**\) can be deleted. The operations that are affected include **DELETE** and **DELETE ... RETURNING**. + + The following table describes the relationship between row-level access control policies and SQL statements. + + **Table 1** Relationship between row-level access control policies and SQL statements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Command

+

SELECT/ALL policy

+

UPDATE/ALL policy

+

DELETE/ALL policy

+

SELECT

+

Existing row

+

No

+

No

+

SELECT FOR UPDATE/SHARE

+

Existing row

+

Existing row

+

No

+

UPDATE

+

No

+

Existing row

+

No

+

UPDATE RETURNING

+

Existing row

+

Existing row

+

No

+

DELETE

+

No

+

No

+

Existing row

+

DELETE RETURNING

+

Existing row

+

No

+

Existing row

+
+ +- **role\_name** + + Specifies database users affected by a row-level access control policy. + + If this parameter is not specified, the default value **PUBLIC** will be used, indicating that all database users will be affected. You can specify multiple affected database users. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >System administrators are not affected by row access control. + + +- **using\_expression** + + Specifies an expression defined for a row-level access control policy \(return type: boolean\). + + The expression cannot contain aggregate functions or window functions. In the statement rewriting phase of a query, if row-level access control for a data table is enabled, the expressions that meet the specified conditions will be added to the plan tree. The expression is calculated for each tuple in the data table. For **SELECT**, **UPDATE**, and **DELETE**, row data is visible to the current user only when the return value of the expression is **TRUE**. If the expression returns **FALSE**, the tuple is invisible to the current user. In this case, the user cannot view the tuple through the **SELECT** statement, update the tuple through the **UPDATE** statement, or delete the tuple through the **DELETE** statement. + + +## Example: + +``` +-- Create user alice. +postgres=# CREATE ROLE alice PASSWORD 'Gauss@123'; + +-- Create user bob. +postgres=# CREATE ROLE bob PASSWORD 'Gauss@123'; + +-- Create the data table all_data. +postgres=# CREATE TABLE all_data(id int, role varchar(100), data varchar(100)); + +--Insert data into the data table. +postgres=# INSERT INTO all_data VALUES(1, 'alice', 'alice data'); +postgres=# INSERT INTO all_data VALUES(2, 'bob', 'bob data'); +postgres=# INSERT INTO all_data VALUES(3, 'peter', 'peter data'); + +-- Grant the read permission on the all_data table to users alice and bob. +postgres=# GRANT SELECT ON all_data TO alice, bob; + +--Enable row-level access control. +postgres=# ALTER TABLE all_data ENABLE ROW LEVEL SECURITY; + +--Create a row-level access control policy to specify that the current user can view only their own data. +postgres=# CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = CURRENT_USER); + +-- View information about the all_data table. +postgres=# \d+ all_data + Table "public.all_data" + Column | Type | Modifiers | Storage | Stats target | Description +--------+------------------------+-----------+----------+--------------+------------- + id | integer | | plain | | + role | character varying(100) | | extended | | + data | character varying(100) | | extended | | +Row Level Security Policies: + POLICY "all_data_rls" + USING (((role)::name = "current_user"())) +Has OIDs: no +Location Nodes: ALL DATANODES +Options: orientation=row, compression=no, enable_rowsecurity=true + +-- Run SELECT. +postgres=# SELECT * FROM all_data; + id | role | data +----+-------+------------ + 1 | alice | alice data + 2 | bob | bob data + 3 | peter | peter data +(3 rows) + +postgres=# EXPLAIN(COSTS OFF) SELECT * FROM all_data; + QUERY PLAN +---------------------------- + Streaming (type: GATHER) + Node/s: All dbnodes + -> Seq Scan on all_data +(3 rows) + +-- Switch to user alice and run SELECT. +postgres=# SELECT * FROM all_data; + id | role | data +----+-------+------------ + 1 | alice | alice data +(1 row) + +postgres=# EXPLAIN(COSTS OFF) SELECT * FROM all_data; + QUERY PLAN +---------------------------------------------------------------- + Streaming (type: GATHER) + Node/s: All dbnodes + -> Seq Scan on all_data + Filter: ((role)::name = 'alice'::name) + Notice: This query is influenced by row level security feature +(5 rows) +``` + +## Helpful Links + +[DROP ROW LEVEL SECURITY POLICY](drop-row-level-security-policy.md) + diff --git a/content/en/docs/Developerguide/create-schema.md b/content/en/docs/Developerguide/create-schema.md new file mode 100644 index 000000000..2e3b26c3c --- /dev/null +++ b/content/en/docs/Developerguide/create-schema.md @@ -0,0 +1,81 @@ +# CREATE SCHEMA + +## Function + +**CREATE SCHEMA** creates a schema. + +Named objects are accessed either by "qualifying" their names with the schema name as a prefix, or by setting a search path that includes the desired schema. When creating named objects, you can also use the schema name as a prefix. + +Optionally, **CREATE SCHEMA** can include sub-commands to create objects within the new schema. The sub-commands are treated essentially the same as separate commands issued after creating the schema. If the **AUTHORIZATION** clause is used, all the created objects are owned by this user. + +## Precautions + +- Only a user with the **CREATE** permission on the current database can perform this operation. +- The owner of an object created by a system administrator in a schema with the same name as a common user is the common user, not the system administrator. + +## Syntax + +- Create a schema based on a specified name. + + ``` + CREATE SCHEMA schema_name + [ AUTHORIZATION user_name ] [ schema_element [ ... ] ]; + ``` + +- Create a schema based on a username. + + ``` + CREATE SCHEMA AUTHORIZATION user_name [ schema_element [ ... ] ]; + ``` + + +## Parameter Description + +- **schema\_name** + + Specifies the schema name. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >The name must be unique. + >The schema name cannot start with **pg\_**. + + Value range: a string. It must comply with the naming convention rule. + +- **AUTHORIZATION user\_name** + + Specifies the owner of a schema. If **schema\_name** is not specified, **user\_name** will be used as the schema name. In this case, **user\_name** can only be a role name. + + Value range: an existing username or role name + +- **schema\_element** + + Specifies an SQL statement defining an object to be created within the schema. Currently, only **CREATE TABLE**, **CREATE VIEW**, **CREATE INDEX**, **CREATE PARTITION**, and **GRANT** are accepted as clauses within **CREATE SCHEMA**. + + Objects created by sub-commands are owned by the user specified by **AUTHORIZATION**. + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If objects in the schema on the current search path are with the same name, specify the schemas for different objects. You can run **SHOW SEARCH\_PATH** to check the schemas on the current search path. + +## Examples + +``` +-- Create the role1 role. +postgres=# CREATE ROLE role1 IDENTIFIED BY 'Bigdata@123'; + +-- Create a schema named role1 for the role1 role. The owner of the films and winners tables created by the clause is role1. +postgres=# CREATE SCHEMA AUTHORIZATION role1 + CREATE TABLE films (title text, release date, awards text[]) + CREATE VIEW winners AS + SELECT title, release FROM films WHERE awards IS NOT NULL; + +-- Delete the schema. +postgres=# DROP SCHEMA role1 CASCADE; +-- Delete the user. +postgres=# DROP USER role1 CASCADE; +``` + +## Helpful Links + +[ALTER SCHEMA](alter-schema.md) and [DROP SCHEMA](drop-schema.md) + diff --git a/content/en/docs/Developerguide/create-sequence.md b/content/en/docs/Developerguide/create-sequence.md new file mode 100644 index 000000000..dba8d26b5 --- /dev/null +++ b/content/en/docs/Developerguide/create-sequence.md @@ -0,0 +1,140 @@ +# CREATE SEQUENCE + +## Function + +**CREATE SEQUENCE** adds a sequence to the current database. The owner of a sequence is the user who creates the sequence. + +## Precautions + +- A sequence is a special table that stores arithmetic progressions. It has no actual meaning and is usually used to generate unique identifiers for rows or tables. +- If a schema name is given, the sequence is created in the specified schema; otherwise, it is created in the current schema. The sequence name must be different from the names of other sequences, tables, indexes, views in the same schema. +- After the sequence is created, functions **nextval\(\)** and **generate\_series\(1,N\)** insert data to the table. Make sure that the number of times for invoking **nextval** is greater than or equal to N+1. Otherwise, errors will be reported because the number of times for invoking function **generate\_series\(\)** is N+1. + +## Syntax + +``` +CREATE SEQUENCE name [ INCREMENT [ BY ] increment ] + [ MINVALUE minvalue | NO MINVALUE | NOMINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE | NOMAXVALUE] + [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE | NOCYCLE ] + [ OWNED BY { table_name.column_name | NONE } ]; +``` + +## Parameter Description + +- **name** + + Specifies the name of a sequence to be created. + + Value range: a sting containing only lowercase letters, uppercase letters, special characters \#\_$, and digits + +- **increment** + + Specifies the step for a sequence. A positive number generates an ascending sequence, and a negative number generates a decreasing sequence. + + The default value is **1**. + +- **MINVALUE minvalue | NO MINVALUE| NOMINVALUE** + + Specifies the minimum value of the sequence. If **MINVALUE** is not declared, or **NO MINVALUE** is declared, the default value of the ascending sequence is **1**, and that of the descending sequence is **-263-1**. **NOMINVALUE** is equivalent to **NO MINVALUE**. + +- **MAXVALUE maxvalue | NO MAXVALUE| NOMAXVALUE** + + Specifies the maximum value of the sequence. If **MAXVALUE** is not declared, or **NO MAXVALUE** is declared, the default value of the ascending sequence is **263-1**, and that of the descending sequence is **-1**. **NOMAXVALUE** is equivalent to **NO MAXVALUE**. + +- **start** + + Specifies the start value of the sequence. The default value for an ascending sequence is **minvalue** and that for a descending sequence is **maxvalue**. + +- **cache** + + Specifies the number of sequences stored in the memory for quick access purposes. + + Default value **1** indicates that one sequence can be generated each time. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >It is not recommended that you define **cache** and **maxvalue** or **minvalue** at the same time. The continuity of sequences cannot be ensured after **cache** is defined because unacknowledged sequences may be generated, causing waste of sequences. + +- **CYCLE** + + Recycles sequences after the number of sequences reaches **maxvalue** or **minvalue**. + + If **NO CYCLE** is specified, any invocation of **nextval** would return an error after the number of sequences reaches **maxvalue** or **minvalue**. + + **NOCYCLE** is equivalent to **NO CYCLE**. + + The default value is **NO CYCLE**. + + If **CYCLE** is specified, the sequence uniqueness cannot be ensured. + +- **OWNED BY**- + + Associates a sequence with a specified column included in a table. In this way, the sequence will be deleted when you delete its associated column or the table where the column belongs to. The associated table and sequence must be owned by the same user and in the same schema. **OWNED BY** only establishes the association between a table column and the sequence. Sequences on the column do not increase automatically when data is inserted. + + The default value **OWNED BY NONE** indicates that such association does not exist. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >You are not advised to use the sequence created using **OWNED BY** in other tables. If multiple tables need to share a sequence, the sequence must not belong to a specific table. + + +## Examples + +Create an ascending sequence named **serial**, which starts from 101. + +``` +postgres=# CREATE SEQUENCE serial + START 101 + CACHE 20; +``` + +Select the next number from the sequence. + +``` +postgres=# SELECT nextval('serial'); + nextval + --------- + 101 +``` + +Select the next number from the sequence. + +``` +postgres=# SELECT nextval('serial'); + nextval + --------- + 102 +``` + +Create a sequence associated with the table. + +``` +postgres=# CREATE TABLE customer_address +( + ca_address_sk integer not null, + ca_address_id char(16) not null, + ca_street_number char(10) , + ca_street_name varchar(60) , + ca_street_type char(15) , + ca_suite_number char(10) , + ca_city varchar(60) , + ca_county varchar(30) , + ca_state char(2) , + ca_zip char(10) , + ca_country varchar(20) , + ca_gmt_offset decimal(5,2) , + ca_location_type char(20) +); + +postgres=# CREATE SEQUENCE serial1 + START 101 + CACHE 20 +OWNED BY customer_address.ca_address_sk; +-- Delete the sequence. +postgres=# DROP TABLE customer_address; +postgres=# DROP SEQUENCE serial cascade; +postgres=# DROP SEQUENCE serial1 cascade; +``` + +## Helpful Links + +[DROP SEQUENCE](drop-sequence.md) and [ALTER SEQUENCE](alter-sequence.md) + diff --git a/content/en/docs/Developerguide/create-synonym.md b/content/en/docs/Developerguide/create-synonym.md new file mode 100644 index 000000000..ce49dd681 --- /dev/null +++ b/content/en/docs/Developerguide/create-synonym.md @@ -0,0 +1,107 @@ +# CREATE SYNONYM + +## Function + +**CREATE SYNONYM** creates a synonym object. A synonym is an alias of a database object and is used to record the mapping between database object names. You can use synonyms to access associated database objects. + +## Precautions + +- The user of a synonym should be its owner. +- If the schema name is specified, create a synonym in the specified schema. Otherwise create a synonym in the current schema. +- Database objects that can be accessed using synonyms include tables, views, functions, and stored procedures. +- To use synonyms, you must have the required permissions on associated objects. +- The following DML statements support synonyms: **SELECT**, **INSERT**, **UPDATE**, **DELETE**, **EXPLAIN**, and **CALL**. +- The **CREATE SYNONYM** statement of an associated function or stored procedure cannot be used in a stored procedure. You are advised to use synonyms existing in the **pg\_synonym** system catalog in the stored procedure. + +## Syntax + +``` +CREATE [ OR REPLACE ] SYNONYM synonym_name + FOR object_name; +``` + +## Parameter Description + +- **synonym** + + Specifies the name of the synonym to be created, which can contain the schema name. + + Value range: a string. It must comply with the naming convention. + +- **object\_name** + + Specifies the name of an object that is associated \(optionally with schema names\). + + Value range: a string. It must comply with the naming convention. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >**object\_name** can be the name of an object that does not exist. + + +## Example + +``` +-- Create schema ot. +postgres=# CREATE SCHEMA ot; + +-- Create table ot.t1 and its synonym t1. +postgres=# CREATE TABLE ot.t1(id int, name varchar2(10)); +postgres=# CREATE OR REPLACE SYNONYM t1 FOR ot.t1; + +-- Use synonym t1. +postgres=# SELECT * FROM t1; +postgres=# INSERT INTO t1 VALUES (1, 'ada'), (2, 'bob'); +postgres=# UPDATE t1 SET t1.name = 'cici' WHERE t1.id = 2; + +-- Create synonym v1 and its associated view ot.v_t1. +postgres=# CREATE SYNONYM v1 FOR ot.v_t1; +postgres=# CREATE VIEW ot.v_t1 AS SELECT * FROM ot.t1; + +-- Use synonym v1. +postgres=# SELECT * FROM v1; + +-- Create overloaded function ot.add and its synonym add. +postgres=# CREATE OR REPLACE FUNCTION ot.add(a integer, b integer) RETURNS integer AS +$$ +SELECT $1 + $2 +$$ +LANGUAGE sql; + +postgres=# CREATE OR REPLACE FUNCTION ot.add(a decimal(5,2), b decimal(5,2)) RETURNS decimal(5,2) AS +$$ +SELECT $1 + $2 +$$ +LANGUAGE sql; + +postgres=# CREATE OR REPLACE SYNONYM add FOR ot.add; + +-- Use synonym add. +postgres=# SELECT add(1,2); +postgres=# SELECT add(1.2,2.3); + +-- Create stored procedure ot.register and its synonym register. +postgres=# CREATE PROCEDURE ot.register(n_id integer, n_name varchar2(10)) +SECURITY INVOKER +AS +BEGIN + INSERT INTO ot.t1 VALUES(n_id, n_name); +END; +/ + +postgres=# CREATE OR REPLACE SYNONYM register FOR ot.register; + +-- Use synonym register to invoke the stored procedure. +postgres=# CALL register(3,'mia'); + +-- Delete the synonym. +postgres=# DROP SYNONYM t1; +postgres=# DROP SYNONYM IF EXISTS v1; +postgres=# DROP SYNONYM IF EXISTS add; +postgres=# DROP SYNONYM register; +postgres=# DROP SCHEMA ot CASCADE; +``` + +## Helpful Links + +[ALTER SYNONYM](alter-synonym.md) and [DROP SYNONYM](drop-synonym.md) + diff --git a/content/en/docs/Developerguide/create-table-as.md b/content/en/docs/Developerguide/create-table-as.md new file mode 100644 index 000000000..ffa2b3c46 --- /dev/null +++ b/content/en/docs/Developerguide/create-table-as.md @@ -0,0 +1,129 @@ +# CREATE TABLE AS + +## Function + +**CREATE TABLE AS** creates a table from the results of a query. + +It creates a table and fills it with data obtained using **SELECT**. The table columns have the names and data types associated with the output columns of **SELECT** \(except that you can override the **SELECT** output column names by giving an explicit list of new column names\). + +**CREATE TABLE AS** queries a source table once and writes the data in a new table. The result in the query view changes with the source table. In contrast, the view re-computes and defines its **SELECT** statement at each query. + +## Precautions + +- This statement cannot be used to create a partitioned table. +- If an error occurs during table creation, after it is fixed, the system may fail to delete the disk files that are created before the last automatic clearance and whose size is not 0. This problem seldom occurs and does not affect system running of the database. + +## Syntax + +``` +CREATE [ UNLOGGED ] TABLE table_name + [ (column_name [, ...] ) ] + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ COMPRESS | NOCOMPRESS ] + [ TABLESPACE tablespace_name ] + AS query + [ WITH [ NO ] DATA ]; +``` + +## Parameter Description + +- **UNLOGGED** + + Specifies that the table is created as an unlogged table. Data written to unlogged tables is not written to the WALs, which makes them considerably faster than ordinary tables. However, they are not crash-safe: an unlogged table is automatically truncated after a crash or unclean shutdown. The contents of an unlogged table are also not replicated to standby servers. Any indexes created on an unlogged table are automatically unlogged as well. + + - Usage scenario: Unlogged tables do not ensure data security. Users can back up data before using unlogged tables; for example, users should back up the data before a system upgrade. + - Troubleshooting: If data is missing in the indexes of unlogged tables due to some unexpected operations such as an unclean shutdown, users should re-create the indexes with errors. + +- **table\_name** + + Specifies the name of the table to be created. + + Value range: a string. It must comply with the naming convention. + +- **column\_name** + + Specifies the name of a column to be created in the new table. + + Value range: a string. It must comply with the naming convention. + +- **WITH \( storage\_parameter \[= value\] \[, ... \] \)** + + Specifies an optional storage parameter for a table or an index. See details of parameters below. + + - FILLFACTOR + + The fill factor of a table is a percentage from 10 to 100. **100** \(complete filling\) is the default value. When a smaller fill factor is specified, **INSERT** operations pack table pages only to the indicated percentage. The remaining space on each page is reserved for updating rows on that page. This gives **UPDATE** a chance to place the updated copy of a row on the same page, which is more efficient than placing it on a different page. For a table whose entries are never updated, setting the fill factor to **100** \(complete filling\) is the best choice, but in heavily updated tables a smaller fill factor would be appropriate. The parameter is only valid for row–store tables. + + Value range: 10–100 + + - ORIENTATION + + Value range: + + **COLUMN**: The data will be stored in columns. + + **ROW** \(default value\): The data will be stored in rows. + + - COMPRESSION + + Specifies the compression level of table data. It determines the compression ratio and time. Generally, the higher the level of compression, the higher the ratio, the longer the time; and the lower the level of compression, the lower the ratio, the shorter the time. The actual compression ratio depends on the distribution mode of table data loaded. + + Value range: + + The valid values for column-store tables are **YES**, **NO**, **LOW**, **MIDDLE**, and **HIGH**, and the default value is **LOW**. + + Valid values for row-store tables are **YES** and **NO**, and the default value is **NO**. + + - MAX\_BATCHROW + + Specifies the maximum number of rows in a storage unit during data loading. The parameter is only valid for column-store tables. + + Value range: 10000 to 60000 + + +- **COMPRESS / NOCOMPRESS** + + Specifies keyword **COMPRESS** during the creation of a table, so that the compression feature is triggered in case of bulk **INSERT** operations. If this feature is enabled, a scan is performed for all tuple data within the page to generate a dictionary and then the tuple data is compressed and stored. If **NOCOMPRESS** is specified, the table is not compressed. + + Default value: **NOCOMPRESS**, that is, tuple data is not compressed before storage. + +- **TABLESPACE tablespace\_name** + + Specifies that the new table will be created in the **tablespace\_name** tablespace. If not specified, the default tablespace is used. + +- **AS query** + + Specifies a **SELECT** or **VALUES** command, or an **EXECUTE** command that runs a prepared **SELECT** or **VALUES** query. + +- **\[ WITH \[ NO \] DATA \]** + + Specifies whether the data produced by the query should be copied to the new table. By default, the data will be copied. If the value **NO** is used, only the table structure will be copied. + + +## Example: + +``` +-- Create the tpcds.store_returns table. +postgres=# CREATE TABLE tpcds.store_returns +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + sr_item_sk VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER +); +-- Create the tpcds.store_returns_t1 table and insert numbers that are greater than 16 in the sr_item_sk column of the tpcds.store_returns table. +postgres=# CREATE TABLE tpcds.store_returns_t1 AS SELECT * FROM tpcds.store_returns WHERE sr_item_sk > '4795'; + +-- Copy tpcds.store_returns to create the tpcds.store_returns_t2 table. +postgres=# CREATE TABLE tpcds.store_returns_t2 AS table tpcds.store_returns; + +-- Delete the table. +postgres=# DROP TABLE tpcds.store_returns_t1 ; +postgres=# DROP TABLE tpcds.store_returns_t2 ; +postgres=# DROP TABLE tpcds.store_returns; +``` + +## Helpful Links + +[CREATE TABLE](create-table.md) and [SELECT](select.md) + diff --git a/content/en/docs/Developerguide/create-table-partition.md b/content/en/docs/Developerguide/create-table-partition.md new file mode 100644 index 000000000..5f8e7d1ab --- /dev/null +++ b/content/en/docs/Developerguide/create-table-partition.md @@ -0,0 +1,637 @@ +# CREATE TABLE PARTITION + +## Function + +**CREATE TABLE PARTITION** creates a partitioned table. A partitioned table is a logical table that is divided into several physical partitions for storage based on a specific plan. Data is stored in physical partitions not the logical table. + +The common forms of partitioning include range partitioning, hash partitioning, list partitioning, and value partitioning. Currently, the system supports only range partitioning for row-store and column-store tables. + +In range partitioning, a table is partitioned based on ranges defined by values in one or more columns, with no overlap between the ranges of values assigned to different partitions. Each range has a dedicated partition for data storage. + +The range partitioning policy refers to how data is inserted into partitions. Currently, range partitioning only allows the use of the range partitioning policy. + +In range partitioning, a table is partitioned based on partition key values. If a record can be mapped to a partition, it is inserted into the partition; if it cannot, an error message is returned. Range partitioning is the most commonly used partitioning policy. + +Partitioning can provide several benefits: + +- Query performance can be improved drastically in certain situations, particularly when most of the heavily accessed rows of the table are in a single partition or a small number of partitions. Partitioning narrows the range of data search and improves data access efficiency. +- In the case of an insert or update operation on most portions of a single partition, performance can be improved by taking advantage of continuous scan of that partition instead of partitions scattered across the whole table. +- Frequent loading or deletion operations on records in a separate partition can be accomplished by reading or removing that partition. It also entirely avoids the **VACUUM** overload caused by bulk **DELETE** operations \(only for range partitioning\). + +## Precautions + +A partitioned table supports unique and primary key constraints. The constraint keys of these constraints must contain all partition keys. + +## Syntax + +``` +CREATE TABLE [ IF NOT EXISTS ] partition_table_name +( [ + { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] + | table_constraint + | LIKE source_table [ like_option [...] ] }[, ... ] +] ) + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ COMPRESS | NOCOMPRESS ] + [ TABLESPACE tablespace_name ] + PARTITION BY { + {RANGE (partition_key) ( partition_less_than_item [, ... ] )} | + {RANGE (partition_key) ( partition_start_end_item [, ... ] )} + } [ { ENABLE | DISABLE } ROW MOVEMENT ]; +``` + +- **column\_constraint** is as follows: + + ``` + [ CONSTRAINT constraint_name ] + { NOT NULL | + NULL | + CHECK ( expression ) | + DEFAULT default_expr | + UNIQUE index_parameters | + PRIMARY KEY index_parameters } + [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + ``` + +- **table\_constraint** is as follows: + + ``` + [ CONSTRAINT constraint_name ] + { CHECK ( expression ) | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters} + [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + ``` + + +- **like\_option** is as follows: + + ``` + { INCLUDING | EXCLUDING } { DEFAULTS | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS| ALL } + ``` + + +- **index\_parameters** is as follows: + + ``` + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ USING INDEX TABLESPACE tablespace_name ] + ``` + + +- partition\_less\_than\_item: + + ``` + PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } ) [TABLESPACE tablespace_name] + ``` + +- partition\_start\_end\_item: + + ``` + PARTITION partition_name { + {START(partition_value) END (partition_value) EVERY (interval_value)} | + {START(partition_value) END ({partition_value | MAXVALUE})} | + {START(partition_value)} | + {END({partition_value | MAXVALUE})} + } [TABLESPACE tablespace_name] + ``` + + +## Parameter Description + +- **IF NOT EXISTS** + + Sends a notice, but does not throw an error, if a table with the same name exists. + +- **partition\_table\_name** + + Specifies the name of the partitioned table. + + Value range: a string. It must comply with the naming convention. + +- **column\_name** + + Specifies the name of a column to be created in the new table. + + Value range: a string. It must comply with the naming convention. + +- **data\_type** + + Specifies the data type of the column. + +- **COLLATE collation** + + Assigns a collation to the column \(which must be of a collatable data type\). If no collation is specified, the default collation is used. + +- **CONSTRAINT constraint\_name** + + Specifies the name of a column or table constraint. The optional constraint clauses specify constraints that new or updated rows must satisfy for an insert or update operation to succeed. + + There are two ways to define constraints: + + - A column constraint is defined as part of a column definition, and it is bound to a particular column. + - A table constraint is not bound to a particular column but can apply to more than one column. + +- **LIKE source\_table \[ like\_option ... \]** + + Specifies a table from which the new table automatically copies all column names, their data types, and their not-null constraints. + + Unlike **INHERITS**, the new table and original table are decoupled after creation is complete. Changes to the original table will not be applied to the new table, and it is not possible to include data of the new table in scans of the original table. + + Default expressions for the copied column definitions will be copied only if **INCLUDING DEFAULTS** is specified. The default behavior is to exclude default expressions, resulting in the copied columns in the new table having default values **null**. + + Not-null constraints are always copied to the new table. **CHECK** constraints will only be copied if **INCLUDING CONSTRAINTS** is specified; other types of constraints will never be copied. These rules also apply to column constraints and table constraints. + + Unlike those of **INHERITS**, columns and constraints copied by **LIKE** are not merged with similarly named columns and constraints. If the same name is specified explicitly or in another **LIKE** clause, an error is reported. + + - Any indexes on the original table will not be created on the new table, unless the **INCLUDING INDEXES** clause is specified. + - **STORAGE** settings for the copied column definitions are copied only if **INCLUDING STORAGE** is specified. The default behavior is to exclude **STORAGE** settings. + - If **INCLUDING COMMENTS** is specified, comments for the copied columns, constraints, and indexes are copied. The default behavior is to exclude comments. + - If **INCLUDING RELOPTIONS** is specified, the new table will copy the storage parameter \(that is, **WITH** clause\) of the source table. The default behavior is to exclude partition definition of the storage parameter of the source table. + - **INCLUDING ALL** contains the meaning of **INCLUDING DEFAULTS**, **INCLUDING CONSTRAINTS**, **INCLUDING INDEXES**, **INCLUDING STORAGE**, **INCLUDING COMMENTS**, **INCLUDING PARTITION**, and **INCLUDING RELOPTIONS**. + +- **WITH \( storage\_parameter \[= value\] \[, ... \] \)** + + Specifies an optional storage parameter for a table or an index. Optional parameters are as follows: + + - FILLFACTOR + + The fill factor of a table is a percentage from 10 to 100. **100** \(complete filling\) is the default value. When a smaller fill factor is specified, **INSERT** operations pack table pages only to the indicated percentage. The remaining space on each page is reserved for updating rows on that page. This gives **UPDATE** a chance to place the updated copy of a row on the same page, which is more efficient than placing it on a different page. For a table whose entries are never updated, setting the fill factor to **100** \(complete filling\) is the best choice, but in heavily updated tables a smaller fill factor would be appropriate. The parameter has no meaning for column–store tables. + + Value range: 10–100 + + - ORIENTATION + + Determines the storage mode of the data in the table. + + Value range: + + - **COLUMN**: The data will be stored in columns. + - **ROW** \(default value\): The data will be stored in rows. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >**orientation** cannot be modified. + + + - COMPRESSION + - Valid values for column-store tables are **LOW**, **MIDDLE**, **HIGH**, **YES**, and **NO**, and the compression level increases accordingly. The default is **LOW**. + - Valid values for row-store tables are **YES** and **NO**, and the default value is **NO**. + + - MAX\_BATCHROW + + Specifies the maximum number of rows in a storage unit during data loading. The parameter is only valid for column-store tables. + + Value range: 10000 to 60000 + + - PARTIAL\_CLUSTER\_ROWS + + Specifies the number of records to be partially clustered for storage during data loading. The parameter is only valid for column-store tables. + + Value range: a number greater than or equal to 100000 The value is a multiple of _MAX\_BATCHROW_. + + - DELTAROW\_THRESHOLD + + A reserved parameter. The parameter is only valid for column-store tables. + + Value range: 0 to 9999 + + +- **COMPRESS / NOCOMPRESS** + + Specifies keyword **COMPRESS** during the creation of a table, so that the compression feature is triggered in case of bulk **INSERT** operations. If this feature is enabled, a scan is performed for all tuple data within the page to generate a dictionary and then the tuple data is compressed and stored. If **NOCOMPRESS** is specified, the table is not compressed. + + Default value: **NOCOMPRESS**, that is, tuple data is not compressed before storage. + +- **TABLESPACE tablespace\_name** + + Specifies that the new table will be created in the **tablespace\_name** tablespace. If not specified, the default tablespace is used. + +- **PARTITION BY RANGE\(partition\_key\)** + + Creates a range partition. **partition\_key** is the name of the partition key. + + \(1\) Assume that the **VALUES LESS THAN** syntax is used. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >In this case, a maximum of four partition keys are supported. + + Data types supported by the partition keys are as follows: **SMALLINT**, **INTEGER**, **BIGINT**, **DECIMAL**, **NUMERIC**, **REAL**, **DOUBLE PRECISION**, **CHARACTER VARYING**\(_n_\), **VARCHAR**\(_n_\), **CHARACTER**\(_n_\), **CHAR**\(_n_\), **CHARACTER**, **CHAR**, **TEXT**, **NVARCHAR2**, **NAME**, **TIMESTAMP\[\(p\)\] \[WITHOUT TIME ZONE\]**, **TIMESTAMP\[\(p\)\] \[WITH TIME ZONE\]**, and **DATE**. + + \(2\) Assume that the **START END** syntax is used. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >In this case, only one partition key is supported. + + Data types supported by the partition key are as follows: **SMALLINT**, **INTEGER**, **BIGINT**, **DECIMAL**, **NUMERIC**, **REAL**, **DOUBLE PRECISION**, **TIMESTAMP\[\(p\)\] \[WITHOUT TIME ZONE\]**, **TIMESTAMP\[\(p\)\] \[WITH TIME ZONE\]**, and **DATE**. + +- **PARTITION partition\_name VALUES LESS THAN \( \{ partition\_value | MAXVALUE \} \)** + + Specifies the information of partitions. **partition\_name** is the name of a range partition. **partition\_value** is the upper limit of a range partition, and the value depends on the type of **partition\_key**. _MAXVALUE_ usually specifies the upper limit of the last range partition. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- Each partition requires an upper limit. + >- The data type of the upper limit must be the same as that of the partition key. + >- In a partition list, partitions are arranged in ascending order of upper limits. A partition with a smaller upper limit value is placed before another partition with a larger one. + +- **PARTITION partition\_name \{START \(partition\_value\) END \(partition\_value\) EVERY \(interval\_value\)\} | **\{START \(partition\_value\) END \(partition\_value|MAXVALUE\)\} | \{START\(partition\_value\)\} | **\{END \(partition\_value | MAXVALUE\)**\} + + Specifies the information of partitions. + + - **partition\_name**: name or name prefix of a range partition. It is the name prefix only in the following cases \(assuming that **partition\_name** is **p1**\): + - If **START**+**END**+**EVERY** is used, the names of partitions will be defined as **p1\_1**, **p1\_2**, and the like. For example, if **PARTITION p1 START\(1\) END\(4\) EVERY\(1\)** is defined, the generated partitions are \[1, 2\), \[2, 3\), and \[3, 4\), and their names are **p1\_1**, **p1\_2**, and **p1\_3**. In this case, **p1** is a name prefix. + - If the defined statement is in the first place and has **START** specified, the range \(_MINVALUE_, **START**\) will be automatically used as the first actual partition, and its name will be **p1\_0**. The other partitions are then named **p1\_1**, **p1\_2**, and the like. For example, if **PARTITION p1 START\(1\), PARTITION p2 START\(2\)** is defined, generated partitions are \(_MINVALUE_, 1\), \[1, 2\), and \[2, _MAXVALUE_\), and their names will be **p1\_0**, **p1\_1**, and **p2**. In this case, **p1** is a name prefix and **p2** is a partition name. **MINVALUE** means the minimum value. + + - **partition\_value**: start value or end value of a range partition. The value depends on **partition\_key** and cannot be _MAXVALUE_. + - **interval\_value**: width of each partition for dividing the \[**START**, **END**\) range. It cannot be _MAXVALUE_. If the value of \(**END** – **START**\) divided by **EVERY** has a remainder, the width of only the last partition is less than the value of **EVERY**. + - _MAXVALUE_: upper limit of the last range partition. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >1. If the defined statement is in the first place and has **START** specified, the range \(_MINVALUE_, **START**\) will be automatically used as the first actual partition. + >2. The **START END** syntax must comply with the following rules: + > - The value of **START** \(if any, same for the following situations\) in each **partition\_start\_end\_item** must be smaller than that of **END**. + > - In two adjacent **partition\_start\_end\_item** statements, the value of the first **END** must be equal to that of the second **START**. + > - The value of **EVERY** in each **partition\_start\_end\_item** must be a positive number \(in ascending order\) and must be smaller than **END** minus **START**. + > - Each partition includes the start value \(unless it is _MINVALUE_\) and excludes the end value. The format is as follows: \[**START**, **END**\). + > - Partitions created by the same **partition\_start\_end\_item** belong to the same tablespace. + > - If **partition\_name** is a name prefix of a partition, the length must not exceed 57 bytes. If there are more than 57 bytes, the prefix will be automatically truncated. + > - When creating or modifying a partitioned table, ensure that the total number of partitions in the table does not exceed the maximum value \(32767\). + >3. In statements for creating partitioned tables, **START END** and **LESS THAN** cannot be used together. + >4. The **START END** syntax in a partitioned table creation SQL statement will be replaced by the **VALUES LESS THAN** syntax when **gs\_dump** is executed. + +- **\{ ENABLE | DISABLE \} ROW MOVEMENT** + + Sets row movement. + + If the tuple value is updated on the partition key during the **UPDATE** action, the partition where the tuple is located is altered. Setting this parameter enables error messages to be reported or movement of the tuple between partitions. + + Value range: + + - **ENABLE** \(default value\): Row movement is enabled. + - **DISABLE**: Row movement is disabled. + + +- **NOT NULL** + + The column is not allowed to contain null values. **ENABLE** can be omitted. + +- **NULL** + + Specifies that the column is allowed to contain null values. This is the default setting. + + This clause is only provided for compatibility with non-standard SQL databases. It is not recommended. + +- **CHECK \(condition\) \[ NO INHERIT \]** + + Specifies an expression producing a Boolean result where the insert or update operation of new or updated rows can succeed only when the expression result is **TRUE** or **UNKNOWN**; otherwise, an error is thrown and the database is not altered. + + A check constraint specified as a column constraint should reference only the column's values, while an expression appearing in a table constraint can reference multiple columns. + + A constraint marked with **NO INHERIT** will not propagate to child tables. + + **ENABLE** can be omitted. + +- **DEFAULT default\_expr** + + Assigns a default data value for a column. The value can be any variable-free expressions. \(Subqueries and cross-references to other columns in the current table are not allowed.\) The data type of the default expression must match the data type of the column. + + The default expression will be used in any insert operation that does not specify a value for the column. If there is no default value for a column, then the default value is null. + +- **UNIQUE index\_parameters** + + **UNIQUE \( column\_name \[, ... \] \) index\_parameters** + + Specifies that a group of one or more columns of a table can contain only unique values. + + For the purpose of a unique constraint, null is not considered equal. + +- **PRIMARY KEY index\_parameters** + + **PRIMARY KEY \( column\_name \[, ... \] \) index\_parameters** + + Specifies that a column or columns of a table can contain only unique \(non-duplicate\) and non-null values. + + Only one primary key can be specified for a table. + +- **DEFERRABLE | NOT DEFERRABLE** + + They determine whether the constraint is deferrable. A constraint that is not deferrable will be checked immediately after every command. Checking of constraints that are deferrable can be postponed until the end of the transaction using the **SET CONSTRAINTS** command. **NOT DEFERRABLE** is the default value. Currently, only **UNIQUE** and **PRIMARY KEY** constraints accept this clause. All the other constraints are not deferrable. + +- **INITIALLY IMMEDIATE | INITIALLY DEFERRED** + + If a constraint is deferrable, this clause specifies the default time to check the constraint. + + - If the constraint is **INITIALLY IMMEDIATE** \(default value\), it is checked after each statement. + - If the constraint is **INITIALLY DEFERRED**, it is checked only at the end of the transaction. + + The constraint check time can be altered using the **SET CONSTRAINTS** statement. + +- **USING INDEX TABLESPACE tablespace\_name** + + Allows selection of the tablespace in which the index associated with a **UNIQUE** or **PRIMARY KEY** constraint will be created. If not specified, **default\_tablespace** is consulted, or the default tablespace in the database if **default\_tablespace** is empty. + + +## Examples + +- Example 1: Create a range-partitioned table **tpcds.web\_returns\_p1**. The table has eight partitions and their partition keys are of the integer type. The ranges of the partitions are: wr\_returned\_date\_sk < 2450815, 2450815 ≤ wr\_returned\_date\_sk < 2451179, 2451179 ≤ wr\_returned\_date\_sk < 2451544, 2451544 ≤ wr\_returned\_date\_sk < 2451910, 2451910 ≤ wr\_returned\_date\_sk < 2452275, 2452275 ≤ wr\_returned\_date\_sk < 2452640, 2452640 ≤ wr\_returned\_date\_sk < 2453005, and wr\_returned\_date\_sk ≥ 2453005. + + ``` + -- Create the tpcds.web_returns table. + postgres=# CREATE TABLE tpcds.web_returns + ( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) + ); + -- Create a range-partitioned table tpcds.web_returns_p1. + postgres=# CREATE TABLE tpcds.web_returns_p1 + ( + WR_RETURNED_DATE_SK INTEGER , + WR_RETURNED_TIME_SK INTEGER , + WR_ITEM_SK INTEGER NOT NULL, + WR_REFUNDED_CUSTOMER_SK INTEGER , + WR_REFUNDED_CDEMO_SK INTEGER , + WR_REFUNDED_HDEMO_SK INTEGER , + WR_REFUNDED_ADDR_SK INTEGER , + WR_RETURNING_CUSTOMER_SK INTEGER , + WR_RETURNING_CDEMO_SK INTEGER , + WR_RETURNING_HDEMO_SK INTEGER , + WR_RETURNING_ADDR_SK INTEGER , + WR_WEB_PAGE_SK INTEGER , + WR_REASON_SK INTEGER , + WR_ORDER_NUMBER BIGINT NOT NULL, + WR_RETURN_QUANTITY INTEGER , + WR_RETURN_AMT DECIMAL(7,2) , + WR_RETURN_TAX DECIMAL(7,2) , + WR_RETURN_AMT_INC_TAX DECIMAL(7,2) , + WR_FEE DECIMAL(7,2) , + WR_RETURN_SHIP_COST DECIMAL(7,2) , + WR_REFUNDED_CASH DECIMAL(7,2) , + WR_REVERSED_CHARGE DECIMAL(7,2) , + WR_ACCOUNT_CREDIT DECIMAL(7,2) , + WR_NET_LOSS DECIMAL(7,2) + ) + WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE) + PARTITION BY RANGE(WR_RETURNED_DATE_SK) + ( + PARTITION P1 VALUES LESS THAN(2450815), + PARTITION P2 VALUES LESS THAN(2451179), + PARTITION P3 VALUES LESS THAN(2451544), + PARTITION P4 VALUES LESS THAN(2451910), + PARTITION P5 VALUES LESS THAN(2452275), + PARTITION P6 VALUES LESS THAN(2452640), + PARTITION P7 VALUES LESS THAN(2453005), + PARTITION P8 VALUES LESS THAN(MAXVALUE) + ); + + -- Import data from the example data table. + postgres=# INSERT INTO tpcds.web_returns_p1 SELECT * FROM tpcds.web_returns; + + -- Delete the P8 partition. + postgres=# ALTER TABLE tpcds.web_returns_p1 DROP PARTITION P8; + + -- Add a partition WR_RETURNED_DATE_SK with values ranging from 2453005 to 2453105. + postgres=# ALTER TABLE tpcds.web_returns_p1 ADD PARTITION P8 VALUES LESS THAN (2453105); + + -- Add a partition WR_RETURNED_DATE_SK with values ranging from 2453105 to MAXVALUE. + postgres=# ALTER TABLE tpcds.web_returns_p1 ADD PARTITION P9 VALUES LESS THAN (MAXVALUE); + + -- Delete the P8 partition. + postgres=# ALTER TABLE tpcds.web_returns_p1 DROP PARTITION FOR (2453005); + + -- Rename the P7 partition to P10. + postgres=# ALTER TABLE tpcds.web_returns_p1 RENAME PARTITION P7 TO P10; + + -- Rename the P6 partition to P11. + postgres=# ALTER TABLE tpcds.web_returns_p1 RENAME PARTITION FOR (2452639) TO P11; + + -- Query the number of rows in the P10 partition. + postgres=# SELECT count(*) FROM tpcds.web_returns_p1 PARTITION (P10); + count + -------- + 0 + (1 row) + + -- Query the number of rows in the P1 partition. + postgres=# SELECT COUNT(*) FROM tpcds.web_returns_p1 PARTITION FOR (2450815); + count + -------- + 0 + (1 row) + ``` + +- Example 2: Create a range-partitioned table **tpcds.web\_returns\_p2**. The table has eight partitions and their partition keys are of the integer type. The upper limit of the eighth partition is _MAXVALUE_. + + The ranges of the partitions are: wr\_returned\_date\_sk < 2450815, 2450815 ≤ wr\_returned\_date\_sk < 2451179, 2451179 ≤ wr\_returned\_date\_sk < 2451544, 2451544 ≤ wr\_returned\_date\_sk < 2451910, 2451910 ≤ wr\_returned\_date\_sk < 2452275, 2452275 ≤ wr\_returned\_date\_sk < 2452640, 2452640 ≤ wr\_returned\_date\_sk < 2453005, and wr\_returned\_date\_sk ≥ 2453005. + + The tablespace of the **tpcds.web\_returns\_p2** partitioned table is **example1**. Partitions **P1** to **P7** have no specified tablespaces, and use the **example1** tablespace of the **tpcds.web\_returns\_p2** partitioned table. The tablespace of the **P8** partitioned table is **example2**. + + Assume that the following data directories of the database nodes are empty directories for which user **dwsadmin** has the read and write permissions: **/pg\_location/mount1/path1**, **/pg\_location/mount2/path2**, **/pg\_location/mount3/path3**, and **/pg\_location/mount4/path4**. + + ``` + postgres=# CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; + postgres=# CREATE TABLESPACE example2 RELATIVE LOCATION 'tablespace2/tablespace_2'; + postgres=# CREATE TABLESPACE example3 RELATIVE LOCATION 'tablespace3/tablespace_3'; + postgres=# CREATE TABLESPACE example4 RELATIVE LOCATION 'tablespace4/tablespace_4'; + + postgres=# CREATE TABLE tpcds.web_returns_p2 + ( + WR_RETURNED_DATE_SK INTEGER , + WR_RETURNED_TIME_SK INTEGER , + WR_ITEM_SK INTEGER NOT NULL, + WR_REFUNDED_CUSTOMER_SK INTEGER , + WR_REFUNDED_CDEMO_SK INTEGER , + WR_REFUNDED_HDEMO_SK INTEGER , + WR_REFUNDED_ADDR_SK INTEGER , + WR_RETURNING_CUSTOMER_SK INTEGER , + WR_RETURNING_CDEMO_SK INTEGER , + WR_RETURNING_HDEMO_SK INTEGER , + WR_RETURNING_ADDR_SK INTEGER , + WR_WEB_PAGE_SK INTEGER , + WR_REASON_SK INTEGER , + WR_ORDER_NUMBER BIGINT NOT NULL, + WR_RETURN_QUANTITY INTEGER , + WR_RETURN_AMT DECIMAL(7,2) , + WR_RETURN_TAX DECIMAL(7,2) , + WR_RETURN_AMT_INC_TAX DECIMAL(7,2) , + WR_FEE DECIMAL(7,2) , + WR_RETURN_SHIP_COST DECIMAL(7,2) , + WR_REFUNDED_CASH DECIMAL(7,2) , + WR_REVERSED_CHARGE DECIMAL(7,2) , + WR_ACCOUNT_CREDIT DECIMAL(7,2) , + WR_NET_LOSS DECIMAL(7,2) + ) + TABLESPACE example1 + PARTITION BY RANGE(WR_RETURNED_DATE_SK) + ( + PARTITION P1 VALUES LESS THAN(2450815), + PARTITION P2 VALUES LESS THAN(2451179), + PARTITION P3 VALUES LESS THAN(2451544), + PARTITION P4 VALUES LESS THAN(2451910), + PARTITION P5 VALUES LESS THAN(2452275), + PARTITION P6 VALUES LESS THAN(2452640), + PARTITION P7 VALUES LESS THAN(2453005), + PARTITION P8 VALUES LESS THAN(MAXVALUE) TABLESPACE example2 + ) + ENABLE ROW MOVEMENT; + + -- Create a partitioned table using LIKE. + postgres=# CREATE TABLE tpcds.web_returns_p3 (LIKE tpcds.web_returns_p2 INCLUDING PARTITION); + + -- Change the tablespace of the P1 partition to example2. + postgres=# ALTER TABLE tpcds.web_returns_p2 MOVE PARTITION P1 TABLESPACE example2; + + -- Change the tablespace of the P2 partition to example3. + postgres=# ALTER TABLE tpcds.web_returns_p2 MOVE PARTITION P2 TABLESPACE example3; + + -- Split the P8 partition at 2453010. + postgres=# ALTER TABLE tpcds.web_returns_p2 SPLIT PARTITION P8 AT (2453010) INTO + ( + PARTITION P9, + PARTITION P10 + ); + + -- Merge the P6 and P7 partitions into one. + postgres=# ALTER TABLE tpcds.web_returns_p2 MERGE PARTITIONS P6, P7 INTO PARTITION P8; + + -- Modify the migration attribute of the partitioned table. + postgres=# ALTER TABLE tpcds.web_returns_p2 DISABLE ROW MOVEMENT; + -- Delete tables and tablespaces. + postgres=# DROP TABLE tpcds.web_returns_p1; + postgres=# DROP TABLE tpcds.web_returns_p2; + postgres=# DROP TABLE tpcds.web_returns_p3; + postgres=# DROP TABLESPACE example1; + postgres=# DROP TABLESPACE example2; + postgres=# DROP TABLESPACE example3; + postgres=# DROP TABLESPACE example4; + ``` + + +- Example 3: Use **START END** to create and modify a range-partitioned table. + + Assume that **/home/omm/startend\_tbs1**, **/home/omm/startend\_tbs2**, **/home/omm/startend\_tbs3**, and **/home/omm/startend\_tbs4** are empty directories for which user omm has the read and write permissions. + + ``` + -- Create tablespaces. + postgres=# CREATE TABLESPACE startend_tbs1 LOCATION '/home/omm/startend_tbs1'; + postgres=# CREATE TABLESPACE startend_tbs2 LOCATION '/home/omm/startend_tbs2'; + postgres=# CREATE TABLESPACE startend_tbs3 LOCATION '/home/omm/startend_tbs3'; + postgres=# CREATE TABLESPACE startend_tbs4 LOCATION '/home/omm/startend_tbs4'; + + -- Create a temporary schema. + postgres=# CREATE SCHEMA tpcds; + postgres=# SET CURRENT_SCHEMA TO tpcds; + + -- Create a partitioned table with the partition key of the integer type. + postgres=# CREATE TABLE tpcds.startend_pt (c1 INT, c2 INT) + TABLESPACE startend_tbs1 + PARTITION BY RANGE (c2) ( + PARTITION p1 START(1) END(1000) EVERY(200) TABLESPACE startend_tbs2, + PARTITION p2 END(2000), + PARTITION p3 START(2000) END(2500) TABLESPACE startend_tbs3, + PARTITION p4 START(2500), + PARTITION p5 START(3000) END(5000) EVERY(1000) TABLESPACE startend_tbs4 + ) + ENABLE ROW MOVEMENT; + + -- View the information of the partitioned table. + postgres=# SELECT relname, boundaries, spcname FROM pg_partition p JOIN pg_tablespace t ON p.reltablespace=t.oid and p.parentid='tpcds.startend_pt'::regclass ORDER BY 1; + relname | boundaries | spcname + -------------+------------+--------------- + p1_0 | {1} | startend_tbs2 + p1_1 | {201} | startend_tbs2 + p1_2 | {401} | startend_tbs2 + p1_3 | {601} | startend_tbs2 + p1_4 | {801} | startend_tbs2 + p1_5 | {1000} | startend_tbs2 + p2 | {2000} | startend_tbs1 + p3 | {2500} | startend_tbs3 + p4 | {3000} | startend_tbs1 + p5_1 | {4000} | startend_tbs4 + p5_2 | {5000} | startend_tbs4 + startend_pt | | startend_tbs1 + (12 rows) + + -- Import data and check the data volume in a partition. + postgres=# INSERT INTO tpcds.startend_pt VALUES (GENERATE_SERIES(0, 4999), GENERATE_SERIES(0, 4999)); + postgres=# SELECT COUNT(*) FROM tpcds.startend_pt PARTITION FOR (0); + count + ------- + 1 + (1 row) + + postgres=# SELECT COUNT(*) FROM tpcds.startend_pt PARTITION (p3); + count + ------- + 500 + (1 row) + + -- Add partitions [5000, 5300), [5300, 5600), [5600, 5900), and [5900, 6000). + postgres=# ALTER TABLE tpcds.startend_pt ADD PARTITION p6 START(5000) END(6000) EVERY(300) TABLESPACE startend_tbs4; + + -- Add the partition p7, specified by MAXVALUE. + postgres=# ALTER TABLE tpcds.startend_pt ADD PARTITION p7 END(MAXVALUE); + + -- Rename the partition p7 to p8. + postgres=# ALTER TABLE tpcds.startend_pt RENAME PARTITION p7 TO p8; + + -- Delete the partition p8. + postgres=# ALTER TABLE tpcds.startend_pt DROP PARTITION p8; + + -- Rename the partition where 5950 is located to p71. + postgres=# ALTER TABLE tpcds.startend_pt RENAME PARTITION FOR(5950) TO p71; + + -- Split the partition [4000, 5000) where 4500 is located. + postgres=# ALTER TABLE tpcds.startend_pt SPLIT PARTITION FOR(4500) INTO(PARTITION q1 START(4000) END(5000) EVERY(250) TABLESPACE startend_tbs3); + + -- Change the tablespace of the partition p2 to startend_tbs4. + postgres=# ALTER TABLE tpcds.startend_pt MOVE PARTITION p2 TABLESPACE startend_tbs4; + + -- View the partition status. + postgres=# SELECT relname, boundaries, spcname FROM pg_partition p JOIN pg_tablespace t ON p.reltablespace=t.oid and p.parentid='tpcds.startend_pt'::regclass ORDER BY 1; + relname | boundaries | spcname + -------------+------------+--------------- + p1_0 | {1} | startend_tbs2 + p1_1 | {201} | startend_tbs2 + p1_2 | {401} | startend_tbs2 + p1_3 | {601} | startend_tbs2 + p1_4 | {801} | startend_tbs2 + p1_5 | {1000} | startend_tbs2 + p2 | {2000} | startend_tbs4 + p3 | {2500} | startend_tbs3 + p4 | {3000} | startend_tbs1 + p5_1 | {4000} | startend_tbs4 + p6_1 | {5300} | startend_tbs4 + p6_2 | {5600} | startend_tbs4 + p6_3 | {5900} | startend_tbs4 + p71 | {6000} | startend_tbs4 + q1_1 | {4250} | startend_tbs3 + q1_2 | {4500} | startend_tbs3 + q1_3 | {4750} | startend_tbs3 + q1_4 | {5000} | startend_tbs3 + startend_pt | | startend_tbs1 + (19 rows) + + -- Delete tables and tablespaces. + postgres=# DROP SCHEMA tpcds CASCADE; + postgres=# DROP TABLESPACE startend_tbs1; + postgres=# DROP TABLESPACE startend_tbs2; + postgres=# DROP TABLESPACE startend_tbs3; + postgres=# DROP TABLESPACE startend_tbs4; + ``` + + +## Helpful Links + +[ALTER TABLE PARTITION](alter-table-partition.md) and [DROP TABLE](drop-table.md) + diff --git a/content/en/docs/Developerguide/create-table.md b/content/en/docs/Developerguide/create-table.md new file mode 100644 index 000000000..84e1aca14 --- /dev/null +++ b/content/en/docs/Developerguide/create-table.md @@ -0,0 +1,925 @@ +# CREATE TABLE + +## Function + +**CREATE TABLE** is used to create an initially empty table in the current database. The table will be owned by the creator. + +## Precautions + +- For details about the data types supported by column-store tables, see [Data Types Supported by Column-store Tables](data-types-supported-by-column-store-tables.md). +- It is recommended that the number of column-store tables do not exceed 1000. +- The primary key constraint and unique constraint in the table must contain distribution keys. +- If an error occurs during table creation, after it is fixed, the system may fail to delete the empty disk files created before the last automatic clearance. This problem seldom occurs and does not affect system running of the database. +- Column-store tables support only **PARTIAL CLUSTER KEY** table-level constraints, but do not support primary and foreign key table-level constraints. +- Only the **NULL**, **NOT NULL**, and **DEFAULT** constant values can be used as column-store table constraints. +- Whether column-store tables support a delta table is specified by the [enable\_delta\_store](parallel-data-import.md#en-us_topic_0237124705_section1035224982816) parameter. The threshold for storing data into a delta table is specified by the **deltarow\_threshold** parameter. +- When JDBC is used, the **DEFAULT** value can be set through **PrepareStatement**. + +## Syntax + +- Create a table. + + ``` + CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name + ({ column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] + | table_constraint + | LIKE source_table [ like_option [...] ] } + [, ... ]) + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] + [ COMPRESS | NOCOMPRESS ] + [ TABLESPACE tablespace_name ]; + ``` + + - **column\_constraint** is as follows: + + ``` + [ CONSTRAINT constraint_name ] + { NOT NULL | + NULL | + CHECK ( expression ) | + DEFAULT default_expr | + UNIQUE index_parameters | + PRIMARY KEY index_parameters } + [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + ``` + + - **compress\_mode** of a column is as follows: + + ``` + { DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS } + ``` + + - **table\_constraint** is as follows: + + ``` + [ CONSTRAINT constraint_name ] + { CHECK ( expression ) | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters | + PARTIAL CLUSTER KEY ( column_name [, ... ] ) } + [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + ``` + + - **like\_option** is as follows: + + ``` + { INCLUDING | EXCLUDING } { DEFAULTS | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | PARTITION | RELOPTIONS | ALL } + ``` + + + +**index\_parameters** is as follows: + +``` +[ WITH ( {storage_parameter = value} [, ... ] ) ] +[ USING INDEX TABLESPACE tablespace_name ] +``` + +## Parameter Description + +- **UNLOGGED** + + If this keyword is specified, the created table is an unlogged table. Data written to unlogged tables is not written to the WALs, which makes them considerably faster than ordinary tables. However, an unlogged table is automatically truncated after a crash or unclean shutdown, incurring data loss risks. Contents of an unlogged table are also not replicated to standby servers. Any indexes created on an unlogged table are not automatically logged as well. + + Usage scenario: Unlogged tables do not ensure data security. Users can back up data before using unlogged tables; for example, users should back up the data before a system upgrade. + + Troubleshooting: If data is missing in the indexes of unlogged tables due to some unexpected operations such as an unclean shutdown, users should re-create the indexes with errors. + +- **GLOBAL | LOCAL** + + When creating a temporary table, you can specify the **GLOBAL** or **LOCAL** keyword before **TEMP** or **TEMPORARY**. Currently, the two keywords are used to be compatible with the SQL standard. In fact, a local temporary table will be created by openGauss regardless of whether **GLOBAL** or **LOCAL** is specified. + +- **TEMPORARY | TEMP** + + If **TEMP** or **TEMPORARY** is specified, the created table is a temporary table. A temporary table is automatically dropped at the end of the current session. Therefore, you can create and use temporary tables in the current session as long as the connected database node in the session is normal. Temporary tables are created only in the current session. If a DDL statement involves operations on temporary tables, a DDL error will be generated. Therefore, you are not advised to perform operations on temporary tables in DDL statements. **TEMP** is equivalent to **TEMPORARY**. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- Temporary tables are visible to the current session through the schema starting with **pg\_temp** start. Users should not delete schema started with **pg\_temp** or **pg\_toast\_temp**. + >- If **TEMPORARY** or **TEMP** is not specified when you create a table but its schema is set to that starting with **pg\_temp\_** in the current session, the table will be created as a temporary table. + +- **IF NOT EXISTS** + + Sends a notice, but does not throw an error, if a table with the same name exists. + +- **table\_name** + + Specifies the name of the table to be created. + +- **column\_name** + + Specifies the name of a column to be created in the new table. + +- **data\_type** + + Specifies the data type of the column. + +- **compress\_mode** + + Specifies the compression option of the table, which is only available for row-store tables. The option specifies the algorithm preferentially used by table columns. + + Value range: **DELTA**, **PREFIX**, **DICTIONARY**, **NUMSTR**, and **NOCOMPRESS** + +- **COLLATE collation** + + Assigns a collation to the column \(which must be of a collatable data type\). If no collation is specified, the default collation is used. + +- **LIKE source\_table \[ like\_option ... \]** + + Specifies a table from which the new table automatically copies all column names, their data types, and their not-null constraints. + + The new table and the original table are decoupled after creation is complete. Changes to the original table will not be applied to the new table, and it is not possible to include data of the new table in scans of the original table. + + Columns and constraints copied by **LIKE** are not merged with the same name. If the same name is specified explicitly or in another **LIKE** clause, an error is reported. + + - The default expressions are copied from the original table to the new table only if **INCLUDING DEFAULTS** is specified. The default behavior is to exclude default expressions, resulting in the copied columns in the new table having default values null. + - The **CHECK** constraints are copied from the original table to the new table only when **INCLUDING CONSTRAINTS** is specified. Other types of constraints are never copied to the new table. Not-null constraints are always copied to the new table. These rules also apply to column constraints and table constraints. + - Any indexes on the original table will not be created on the new table, unless the **INCLUDING INDEXES** clause is specified. + - **STORAGE** settings for the copied column definitions are copied only if **INCLUDING STORAGE** is specified. The default behavior is to exclude **STORAGE** settings. + - If **INCLUDING COMMENTS** is specified, comments for the copied columns, constraints, and indexes are copied. The default behavior is to exclude comments. + - If **INCLUDING PARTITION** is specified, the partition definitions of the source table are copied to the new table, and the new table no longer uses the **PARTITION BY** clause. The default behavior is to exclude partition definition of the original table. + - If **INCLUDING RELOPTIONS** is specified, the new table will copy the storage parameter \(that is, **WITH** clause\) of the source table. The default behavior is to exclude partition definition of the storage parameter of the original table. + - **INCLUDING ALL** contains the meaning of **INCLUDING DEFAULTS**, **INCLUDING CONSTRAINTS**, **INCLUDING INDEXES**, **INCLUDING STORAGE**, **INCLUDING COMMENTS**, **INCLUDING PARTITION**, and **INCLUDING RELOPTIONS**. + + >![](public_sys-resources/icon-notice.gif) **NOTICE:** + >- If the source table contains a sequence with the **SERIAL**, **BIGSERIAL**, or **SMALLSERIRAL** data type, or a column in the source table is a sequence by default and the sequence is created for this table by using **CREATE SEQUENCE...** **OWNED BY**, these sequences will not be copied to the new table, and another sequence specific to the new table will be created. This is different from earlier versions. To share a sequence between the source table and new table, create a shared sequence \(do not use **OWNED BY**\) and set a column in the source table to this sequence. + >- You are not advised to set a column in the source table to the sequence specific to another table especially when the table is distributed in specific node groups, because doing so may result in **CREATE TABLE ... LIKE** execution failures. In addition, doing so may cause the sequence to become invalid in the source sequence because the sequence will also be deleted from the source table when it is deleted from the table that the sequence is specific to. To share a sequence among multiple tables, you are advised to create a shared sequence for them. + +- **WITH \( \{ storage\_parameter = value \} \[, ... \] \)** + + Specifies an optional storage parameter for a table or an index. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >When using **Numeric** of any precision to define a column, specifies precision **p** and scale **s**. When precision and scale are not specified, the input will be displayed. + + The description of parameters is as follows: + + - FILLFACTOR + + The fill factor of a table is a percentage from 10 to 100. **100** \(complete filling\) is the default value. When a smaller fill factor is specified, **INSERT** operations pack table pages only to the indicated percentage. The remaining space on each page is reserved for updating rows on that page. This gives **UPDATE** a chance to place the updated copy of a row on the same page, which is more efficient than placing it on a different page. For a table whose entries are never updated, setting the fill factor to **100** \(complete filling\) is the best choice, but in heavily updated tables a smaller fill factor would be appropriate. The parameter has no meaning for column–store tables. + + Value range: 10–100 + + - ORIENTATION + + Specifies the storage mode \(row-store, column-store, or ORC\) of table data. This parameter cannot be modified once it is set. + + Value range: + + - **ROW** indicates that table data is stored in rows. + + **ROW** applies to OLTP service and scenarios with a large number of point queries or addition/deletion operations. + + - **COLUMN** indicates that the data is stored in columns. + + **COLUMN** applies to the data warehouse service, which has a large amount of aggregation computing, and involves a few column operations. + + Default value: + + If an ordinary tablespace is specified, the default is **ROW**. + + - COMPRESSION + + Specifies the compression level of table data. It determines the compression ratio and time. Generally, the higher the level of compression, the higher the ratio, the longer the time; and the lower the level of compression, the lower the ratio, the shorter the time. The actual compression ratio depends on the distribution mode of table data loaded. + + Value range: + + The valid values for column-store tables are **YES**, **NO**, **LOW**, **MIDDLE**, and **HIGH**, and the default value is **LOW**. + + - COMPRESSLEVEL + + Specifies the table data compression ratio and duration at the same compression level. This divides a compression level into sublevels, providing more choices for compression ratio and duration. As the value becomes greater, the compression ratio becomes higher and duration longer at the same compression level. + + Value range: 0 to 3. The default value is **0**. + + - MAX\_BATCHROW + + Specifies the maximum number of rows in a storage unit during data loading. The parameter is only valid for column-store tables. + + Value range: 10000 to 60000 + + - PARTIAL\_CLUSTER\_ROWS + + Specifies the number of records to be partially clustered for storage during data loading. The parameter is only valid for column-store tables. + + Value range: 600000 to 2147483647 + + - DELTAROW\_THRESHOLD + + Specifies the upper limit of to-be-imported rows for triggering the data import to a delta table when data of a column-store table is to be imported. This parameter takes effect only if [enable\_delta\_store](parallel-data-import.md#en-us_topic_0237124705_section1035224982816) is set to **on**. The parameter is only valid for column-store tables. + + Value range: from 0 to 9999. The default value is **100**. + + - VERSION + + Specifies the version of ORC storage format. + + Value range: 0.12. ORC 0.12 format is supported currently. More formats will be supported as the development of ORC format. + + Default value: **0.12** + + +- **ON COMMIT \{ PRESERVE ROWS | DELETE ROWS | DROP \}** + + **ON COMMIT** determines what to do when you commit a temporary table creation operation. The three options are as follows. Currently, only **PRESERVE ROWS** and **DELETE ROWS** can be used. + + - **PRESERVE ROWS** \(default\): No special action is taken at the ends of transactions. The temporary table and its table data are unchanged. + - **DELETE ROWS**: All rows in the temporary table will be deleted at the end of each transaction block. + - **DROP**: The temporary table will be dropped at the end of the current transaction block. + +- **COMPRESS | NOCOMPRESS** + + If you specify **COMPRESS** in the **CREATE TABLE** statement, the compression feature is triggered in case of a bulk **INSERT** operation. If this feature is enabled, a scan is performed for all tuple data within the page to generate a dictionary and then the tuple data is compressed and stored. If **NOCOMPRESS** is specified, the table is not compressed. + + Default value: **NOCOMPRESS**, that is, tuple data is not compressed before storage. + +- **TABLESPACE tablespace\_name** + + Specifies the tablespace where the new table is created. If not specified, the default tablespace is used. + +- **CONSTRAINT constraint\_name** + + Specifies the name of a column or table constraint. The optional constraint clauses specify constraints that new or updated rows must satisfy for an insert or update operation to succeed. + + There are two ways to define constraints: + + - A column constraint is defined as part of a column definition, and it is bound to a particular column. + - A table constraint is not bound to a particular column but can apply to more than one column. + +- **NOT NULL** + + The column is not allowed to contain null values. + +- **NULL** + + The column is allowed to contain null values. This is the default setting. + + This clause is only provided for compatibility with non-standard SQL databases. It is not recommended. + +- **CHECK \( expression \)** + + Specifies an expression producing a Boolean result where the insert or update operation of new or updated rows can succeed only when the expression result is **TRUE** or **UNKNOWN**; otherwise, an error is thrown and the database is not altered. + + A check constraint specified as a column constraint should reference only the column's values, while an expression appearing in a table constraint can reference multiple columns. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >**<\>NULL** and **!=NULL** are invalid in an expression. Change them to **IS NOT NULL**. + +- **DEFAULT default\_expr** + + Assigns a default data value for a column. The value can be any variable-free expressions. \(Subqueries and cross-references to other columns in the current table are not allowed.\) The data type of the default expression must match the data type of the column. + + The default expression will be used in any insert operation that does not specify a value for the column. If there is no default value for a column, then the default value is null. + +- **UNIQUE index\_parameters** + + **UNIQUE \( column\_name \[, ... \] \) index\_parameters** + + Specifies that a group of one or more columns of a table can contain only unique values. + + For the purpose of a unique constraint, null is not considered equal. + +- **PRIMARY KEY index\_parameters** + + **PRIMARY KEY \( column\_name \[, ... \] \) index\_parameters** + + Specifies that a column or columns of a table can contain only unique \(non-duplicate\) and non-null values. + + Only one primary key can be specified for a table. + +- **DEFERRABLE | NOT DEFERRABLE** + + They determine whether the constraint is deferrable. A constraint that is not deferrable will be checked immediately after every command. Checking of constraints that are deferrable can be postponed until the end of the transaction using the **SET CONSTRAINTS** command. **NOT DEFERRABLE** is the default value. Currently, only **UNIQUE** and **PRIMARY KEY** constraints accept this clause. All the other constraints are not deferrable. + +- **PARTIAL CLUSTER KEY** + + Specifies a partial cluster key for storage. When importing data to a column-store table, you can perform local data sorting by specified columns \(single or multiple\). + +- **INITIALLY IMMEDIATE | INITIALLY DEFERRED** + + If a constraint is deferrable, this clause specifies the default time to check the constraint. + + - If the constraint is **INITIALLY IMMEDIATE** \(default value\), it is checked after each statement. + - If the constraint is **INITIALLY DEFERRED**, it is checked only at the end of the transaction. + + The constraint check time can be altered using the **SET CONSTRAINTS** statement. + +- **USING INDEX TABLESPACE tablespace\_name** + + Allows selection of the tablespace in which the index associated with a **UNIQUE** or **PRIMARY KEY** constraint will be created. If not specified, **default\_tablespace** is consulted, or the default tablespace in the database if **default\_tablespace** is empty. + + +## Example: + +``` +-- Create a simple table. +postgres=# CREATE TABLE tpcds.warehouse_t1 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +postgres=# CREATE TABLE tpcds.warehouse_t2 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) DICTIONARY, + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); +``` + +``` +-- Create a table and set the default value of the W_STATE column to GA. +postgres=# CREATE TABLE tpcds.warehouse_t3 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) DEFAULT 'GA', + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +-- Create a table and check whether the W_WAREHOUSE_NAME column is unique at the end of its creation. +postgres=# CREATE TABLE tpcds.warehouse_t4 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) UNIQUE DEFERRABLE, + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); +``` + +``` +-- Create a table with its fill factor set to 70%. +postgres=# CREATE TABLE tpcds.warehouse_t5 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2), + UNIQUE(W_WAREHOUSE_NAME) WITH(fillfactor=70) +); + +-- Alternatively, user the following syntax: +postgres=# CREATE TABLE tpcds.warehouse_t6 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) UNIQUE, + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +) WITH(fillfactor=70); + +-- Create a table and specify that its data is not written to WALs. +postgres=# CREATE UNLOGGED TABLE tpcds.warehouse_t7 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +-- Create a temporary table. +postgres=# CREATE TEMPORARY TABLE warehouse_t24 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +-- Create a temporary table in a transaction and specify that this table is deleted when the transaction is committed. +postgres=# CREATE TEMPORARY TABLE warehouse_t25 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +) ON COMMIT DELETE ROWS; + +-- Create a table and specify that no error is reported for duplicate tables (if any). +postgres=# CREATE TABLE IF NOT EXISTS tpcds.warehouse_t8 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +-- Create a general tablespace. +postgres=# CREATE TABLESPACE DS_TABLESPACE1 RELATIVE LOCATION 'tablespace/tablespace_1'; +-- Specify a tablespace when creating a table. +postgres=# CREATE TABLE tpcds.warehouse_t9 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +) TABLESPACE DS_TABLESPACE1; + +-- Separately specify the index tablespace for W_WAREHOUSE_NAME when creating the table. +postgres=# CREATE TABLE tpcds.warehouse_t10 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) UNIQUE USING INDEX TABLESPACE DS_TABLESPACE1, + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); +``` + +``` +-- Create a table with a primary key constraint. +postgres=# CREATE TABLE tpcds.warehouse_t11 +( + W_WAREHOUSE_SK INTEGER PRIMARY KEY, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +-- An alternative for the preceding syntax is as follows: +postgres=# CREATE TABLE tpcds.warehouse_t12 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2), + PRIMARY KEY(W_WAREHOUSE_SK) +); + +-- Or use the following statement to specify the name of the constraint: +postgres=# CREATE TABLE tpcds.warehouse_t13 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2), + CONSTRAINT W_CSTR_KEY1 PRIMARY KEY(W_WAREHOUSE_SK) +); + +-- Create a table with a compound primary key constraint. +postgres=# CREATE TABLE tpcds.warehouse_t14 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2), + CONSTRAINT W_CSTR_KEY2 PRIMARY KEY(W_WAREHOUSE_SK, W_WAREHOUSE_ID) +); + +-- Create a column-store table. +postgres=# CREATE TABLE tpcds.warehouse_t15 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +) WITH (ORIENTATION = COLUMN); + +-- Create a column-store table using partial clustered storage. +postgres=# CREATE TABLE tpcds.warehouse_t16 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2), + PARTIAL CLUSTER KEY(W_WAREHOUSE_SK, W_WAREHOUSE_ID) +) WITH (ORIENTATION = COLUMN); + +-- Define a column-store table with compression enabled. +postgres=# CREATE TABLE tpcds.warehouse_t17 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +) WITH (ORIENTATION = COLUMN, COMPRESSION=HIGH); + +-- Define a table with compression enabled. +postgres=# CREATE TABLE tpcds.warehouse_t18 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +) COMPRESS; + +-- Define a column check constraint. +postgres=# CREATE TABLE tpcds.warehouse_t19 +( + W_WAREHOUSE_SK INTEGER PRIMARY KEY CHECK (W_WAREHOUSE_SK > 0), + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) CHECK (W_WAREHOUSE_NAME IS NOT NULL), + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +postgres=# CREATE TABLE tpcds.warehouse_t20 +( + W_WAREHOUSE_SK INTEGER PRIMARY KEY, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) CHECK (W_WAREHOUSE_NAME IS NOT NULL), + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2), + CONSTRAINT W_CONSTR_KEY2 CHECK(W_WAREHOUSE_SK > 0 AND W_WAREHOUSE_NAME IS NOT NULL) +); + +-- Define a table. Each row in the table is stored on the database node. +postgres=# CREATE TABLE tpcds.warehouse_t21 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2) +); + +-- Define a hash table. +postgres=# CREATE TABLE tpcds.warehouse_t22 +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + W_WAREHOUSE_NAME VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER , + W_STREET_NUMBER CHAR(10) , + W_STREET_NAME VARCHAR(60) , + W_STREET_TYPE CHAR(15) , + W_SUITE_NUMBER CHAR(10) , + W_CITY VARCHAR(60) , + W_COUNTY VARCHAR(30) , + W_STATE CHAR(2) , + W_ZIP CHAR(10) , + W_COUNTRY VARCHAR(20) , + W_GMT_OFFSET DECIMAL(5,2), + CONSTRAINT W_CONSTR_KEY3 UNIQUE(W_WAREHOUSE_SK) +); + +-- Add a varchar column to the tpcds.warehouse_t19 table. +``` + +``` +postgres=# ALTER TABLE tpcds.warehouse_t19 ADD W_GOODS_CATEGORY varchar(30); + +-- Add a check constraint to the tpcds.warehouse_t19 table. +postgres=# ALTER TABLE tpcds.warehouse_t19 ADD CONSTRAINT W_CONSTR_KEY4 CHECK (W_STATE IS NOT NULL); + +-- Use one statement to alter the types of two existing columns. +postgres=# ALTER TABLE tpcds.warehouse_t19 + ALTER COLUMN W_GOODS_CATEGORY TYPE varchar(80), + ALTER COLUMN W_STREET_NAME TYPE varchar(100); + +-- This statement is equivalent to the preceding statement. +postgres=# ALTER TABLE tpcds.warehouse_t19 MODIFY (W_GOODS_CATEGORY varchar(30), W_STREET_NAME varchar(60)); + +-- Add a not-null constraint to an existing column. +postgres=# ALTER TABLE tpcds.warehouse_t19 ALTER COLUMN W_GOODS_CATEGORY SET NOT NULL; + +-- Remove not-null constraints from an existing column. +postgres=# ALTER TABLE tpcds.warehouse_t19 ALTER COLUMN W_GOODS_CATEGORY DROP NOT NULL; + +-- If no partial cluster is specified in a column-store table, add a partial cluster to the table. +postgres=# ALTER TABLE tpcds.warehouse_t17 ADD PARTIAL CLUSTER KEY(W_WAREHOUSE_SK); + +-- View the constraint name and delete the partial cluster column of a column-store table. +postgres=# \d+ tpcds.warehouse_t17 + Table "tpcds.warehouse_t17" + Column | Type | Modifiers | Storage | Stats target | Description +-------------------+-----------------------+-----------+----------+--------------+------------- + w_warehouse_sk | integer | not null | plain | | + w_warehouse_id | character(16) | not null | extended | | + w_warehouse_name | character varying(20) | | extended | | + w_warehouse_sq_ft | integer | | plain | | + w_street_number | character(10) | | extended | | + w_street_name | character varying(60) | | extended | | + w_street_type | character(15) | | extended | | + w_suite_number | character(10) | | extended | | + w_city | character varying(60) | | extended | | + w_county | character varying(30) | | extended | | + w_state | character(2) | | extended | | + w_zip | character(10) | | extended | | + w_country | character varying(20) | | extended | | + w_gmt_offset | numeric(5,2) | | main | | +Partial Cluster : + "warehouse_t17_cluster" PARTIAL CLUSTER KEY (w_warehouse_sk) +Has OIDs: no +Location Nodes: ALL DATANODES +Options: compression=no, version=0.12 +postgres=# ALTER TABLE tpcds.warehouse_t17 DROP CONSTRAINT warehouse_t17_cluster; + +-- Move a table to another tablespace. +postgres=# ALTER TABLE tpcds.warehouse_t19 SET TABLESPACE PG_DEFAULT; +-- Create the joe schema. +postgres=# CREATE SCHEMA joe; + +-- Move a table to another schema. +postgres=# ALTER TABLE tpcds.warehouse_t19 SET SCHEMA joe; + +-- Rename an existing table. +postgres=# ALTER TABLE joe.warehouse_t19 RENAME TO warehouse_t23; + +-- Delete a column from the warehouse_t23 table. +postgres=# ALTER TABLE joe.warehouse_t23 DROP COLUMN W_STREET_NAME; + +-- Delete the tablespace, schema joe, and schema tables warehouse. +postgres=# DROP TABLE tpcds.warehouse_t1; +postgres=# DROP TABLE tpcds.warehouse_t2; +postgres=# DROP TABLE tpcds.warehouse_t3; +postgres=# DROP TABLE tpcds.warehouse_t4; +postgres=# DROP TABLE tpcds.warehouse_t5; +postgres=# DROP TABLE tpcds.warehouse_t6; +postgres=# DROP TABLE tpcds.warehouse_t7; +postgres=# DROP TABLE tpcds.warehouse_t8; +postgres=# DROP TABLE tpcds.warehouse_t9; +postgres=# DROP TABLE tpcds.warehouse_t10; +postgres=# DROP TABLE tpcds.warehouse_t11; +postgres=# DROP TABLE tpcds.warehouse_t12; +postgres=# DROP TABLE tpcds.warehouse_t13; +postgres=# DROP TABLE tpcds.warehouse_t14; +postgres=# DROP TABLE tpcds.warehouse_t15; +postgres=# DROP TABLE tpcds.warehouse_t16; +postgres=# DROP TABLE tpcds.warehouse_t17; +postgres=# DROP TABLE tpcds.warehouse_t18; +postgres=# DROP TABLE tpcds.warehouse_t20; +postgres=# DROP TABLE tpcds.warehouse_t21; +postgres=# DROP TABLE tpcds.warehouse_t22; +postgres=# DROP TABLE joe.warehouse_t23; +postgres=# DROP TABLE tpcds.warehouse_t24; +postgres=# DROP TABLE tpcds.warehouse_t25; +postgres=# DROP TABLESPACE DS_TABLESPACE1; +postgres=# DROP SCHEMA IF EXISTS joe CASCADE; +``` + +## Helpful Links + +[ALTER TABLE](alter-table.md), [DROP TABLE](drop-table.md), and [CREATE TABLESPACE](create-tablespace.md) + +## Suggestions + +- UNLOGGED + - The unlogged table and its indexes do not use the WAL log mechanism during data writing. Their write speed is much higher than that of ordinary tables. Therefore, they can be used for storing intermediate result sets of complex queries to improve query performance. + - The unlogged table has no primary/standby mechanism. In case of system faults or abnormal breakpoints, data loss may occur. Therefore, the unlogged table cannot be used to store basic data. + +- TEMPORARY | TEMP + - A temporary table is automatically dropped at the end of a session. + +- LIKE + - The new table automatically inherits all column names, data types, and not-null constraints from this table. The new table is irrelevant to the original table after the creation. + +- LIKE INCLUDING DEFAULTS + - The default expressions are copied from the original table to the new table only if **INCLUDING DEFAULTS** is specified. The default behavior is to exclude default expressions, resulting in the copied columns in the new table having default values null. + +- LIKE INCLUDING CONSTRAINTS + - The **CHECK** constraints are copied from the original table to the new table only when **INCLUDING CONSTRAINTS** is specified. Other types of constraints are never copied to the new table. Not-null constraints are always copied to the new table. These rules also apply to column constraints and table constraints. + +- LIKE INCLUDING INDEXES + - Any indexes on the original table will not be created on the new table, unless the **INCLUDING INDEXES** clause is specified. + +- LIKE INCLUDING STORAGE + - **STORAGE** settings for the copied column definitions are copied only if **INCLUDING STORAGE** is specified. The default behavior is to exclude **STORAGE** settings. + +- LIKE INCLUDING COMMENTS + - If **INCLUDING COMMENTS** is specified, comments for the copied columns, constraints, and indexes are copied. The default behavior is to exclude comments. + +- LIKE INCLUDING PARTITION + - If **INCLUDING PARTITION** is specified, the partition definitions of the source table are copied to the new table, and the new table no longer uses the **PARTITION BY** clause. The default behavior is to exclude partition definition of the original table. + +- LIKE INCLUDING RELOPTIONS + - If **INCLUDING RELOPTIONS** is specified, the new table will copy the storage parameter \(that is, **WITH** clause\) of the source table. The default behavior is to exclude partition definition of the storage parameter of the original table. + +- LIKE INCLUDING ALL + - **INCLUDING ALL** contains the meaning of **INCLUDING DEFAULTS**, **INCLUDING CONSTRAINTS**, **INCLUDING INDEXES**, **INCLUDING STORAGE**, **INCLUDING COMMENTS**, **INCLUDING PARTITION**, and **INCLUDING RELOPTIONS**. + +- ORIENTATION ROW + - Creates a row-store table. Row-store applies to the OLTP service, which has many interactive transactions. An interaction involves many columns in the table. Using row-store can improve the efficiency. + +- ORIENTATION COLUMN + - Creates a column-store table. Column-store applies to the DWS, which has a large amount of aggregation computing, and involves a few column operations. + + diff --git a/content/en/docs/Developerguide/create-tablespace.md b/content/en/docs/Developerguide/create-tablespace.md new file mode 100644 index 000000000..5b712b089 --- /dev/null +++ b/content/en/docs/Developerguide/create-tablespace.md @@ -0,0 +1,131 @@ +# CREATE TABLESPACE + +## Function + +**CREATE TABLESPACE** creates a tablespace in a database. + +## Precautions + +- Only system administrators can create a tablespace. +- Do not run **CREATE TABLESPACE** in a transaction block. +- If executing **CREATE TABLESPACE** fails but the internal directory \(or file\) has been created, the directory \(or file\) will remain. You need to manually clear it before creating the tablespace again. If there are residual files of soft links for the tablespace in the data directory, delete the residual files, and then perform O&M operations. +- **CREATE TABLESPACE** cannot be used for two-phase transactions. If it fails on some nodes, the execution cannot be rolled back. +- For details about how to prepare for creating tablespaces, see the description of parameters below. + +## Syntax + +``` +CREATE TABLESPACE tablespace_name + [ OWNER user_name ] RELATIVE LOCATION 'directory' [ MAXSIZE 'space_size' ] + [with_option_clause]; +``` + +The **with\_option\_clause** syntax for creating a general tablespace is as follows: + +``` +WITH ( {filesystem= { 'general'| "general" | general} | + random_page_cost = { 'value ' | value } | + seq_page_cost = { 'value ' | value }}[,...]) +``` + +## Parameter Description + +- **tablespace\_name** + + Specifies name of a tablespace to be created. + + The tablespace name must be distinct from the name of any existing tablespace in openGauss and cannot start with "pg", which are reserved for system catalog spaces. + + Value range: a string. It must comply with the naming convention. + +- **OWNER user\_name** + + Specifies the name of the user who will own the tablespace. If omitted, the default owner is the current user. + + Only system administrators can create tablespaces, but they can use the **OWNER** clause to assign ownership of tablespaces to non-**Sysadmin** administrators. + + Value range: a string. It must be an existing user. + +- **RELATIVE** + + Relative path. The **LOCATION** directory is relative to the data directory in each database node. + + Directory hierarchy: the relative path of the database node directory **/pg\_location/** + + A relative path contains a maximum of two levels. + +- **LOCATION directory** + + Specifies the directory used for the tablespace. The directory must meet the following requirements: + + - The openGauss system user must have the read and write permissions on the directory, and the directory must be empty. If the directory does not exist, the system automatically creates it. + - The directory must be an absolute path, and does not contain special characters, such as dollar sign \($\) and greater-than sign \(\>\). + - The directory cannot be specified under the database data directory. + - The directory must be a local path. + + Value range: a string. It must be a valid directory. + +- **MAXSIZE 'space\_size'** + + Specifies the maximum value of the tablespace in a single database node. + + Value range: a string consisting of a positive integer and unit. The unit can be KB, MB, GB, TB, or PB currently. The unit of parsed value is KB and cannot exceed the range that can be expressed in 64 bits, which is 1 KB to 9007199254740991 KB. + +- **random\_page\_cost** + + Specifies the cost of randomly reading the page overhead. + + Value range: 0 to 1.79769e+308 + + Default value: value of the GUC parameter **random\_page\_cost** + +- **seq\_page\_cost** + + Specifies the cost of reading the page overhead in specified order. + + Value range: 0 to 1.79769e+308 + + Default value: value of GUC parameter **seq\_page\_cost** + + +## Examples + +``` +-- Create a tablespace. +postgres=# CREATE TABLESPACE ds_location1 RELATIVE LOCATION 'tablespace/tablespace_1'; + +-- Create user joe. +postgres=# CREATE ROLE joe IDENTIFIED BY 'Bigdata@123'; + +-- Create user jay. +postgres=# CREATE ROLE jay IDENTIFIED BY 'Bigdata@123'; + +-- Create a tablespace and set its owner to user joe. +postgres=# CREATE TABLESPACE ds_location2 OWNER joe RELATIVE LOCATION 'tablespace/tablespace_1'; + +-- Rename the ds_location1 tablespace to ds_location3. +postgres=# ALTER TABLESPACE ds_location1 RENAME TO ds_location3; + +-- Change the owner of the ds_location2 tablespace. +postgres=# ALTER TABLESPACE ds_location2 OWNER TO jay; + +-- Delete the tablespace. +postgres=# DROP TABLESPACE ds_location2; +postgres=# DROP TABLESPACE ds_location3; + +-- Delete users. +postgres=# DROP ROLE joe; +postgres=# DROP ROLE jay; +``` + +## Helpful Links + +[CREATE DATABASE](create-database.md), [CREATE TABLE](create-table.md), [CREATE INDEX](create-index.md), [DROP TABLESPACE](drop-tablespace.md), and [ALTER TABLESPACE](alter-tablespace.md) + +## Suggestions + +- create tablespace + + You are not advised to create tablespaces in a transaction. + + diff --git a/content/en/docs/Developerguide/create-text-search-configuration.md b/content/en/docs/Developerguide/create-text-search-configuration.md new file mode 100644 index 000000000..50f054e4e --- /dev/null +++ b/content/en/docs/Developerguide/create-text-search-configuration.md @@ -0,0 +1,123 @@ +# CREATE TEXT SEARCH CONFIGURATION + +## Function + +**CREATE TEXT SEARCH CONFIGURATION** creates a text search configuration. A text search configuration specifies a text search parser that can divide a string into tokens, plus dictionaries that can be used to determine which tokens are of interest for searching. + +## Precautions + +- If only the parser is specified, the new text search configuration initially has no mapping from token types to dictionaries, and therefore will ignore all words. Subsequently, **ALTER TEXT SEARCH CONFIGURATION** must be used to create mapping to make the configuration useful. If **COPY** is specified, the parser, mapping and parameters of the text search configuration is copied automatically. +- If the schema name is given, the text search configuration will be created in the specified schema. Otherwise, the configuration will be created in the current schema. +- The user who defines a text search configuration becomes its owner. +- **PARSER** and **COPY** options are mutually exclusive, because when an existing configuration is copied, its parser selection is copied too. +- If only the parser is specified, the new text search configuration initially has no mapping from token types to dictionaries, and therefore will ignore all words. + +## Syntax + +``` +CREATE TEXT SEARCH CONFIGURATION name + ( PARSER = parser_name | COPY = source_config ) + [ WITH ( {configuration_option = value} [, ...] )]; +``` + +## Parameter Description + +- **name** + + Specifies the name of the text search configuration to be created. The name can be schema-qualified. + +- **parser\_name** + + Specifies the name of the text search parser to use for this configuration. + +- **source\_config** + + Specifies the name of an existing text search configuration to copy. + +- **configuration\_option** + + Specifies parameters for the text search configuration, particularly for the parser executed by **parser\_name** or contained by **source\_config**. + + Value range: The default and **ngram** parsers are supported. The parser of default type has no corresponding **configuration\_option**. [Table 1](#en-us_topic_0237122121_en-us_topic_0059777835_t0d301ca84e1a4c16ae8bead85aa1a8c3) lists **configuration\_option** for **ngram** parsers. + + **Table 1** Configuration parameters for **ngram** parsers + + + + + + + + + + + + + + + + + + + + + + +

Parser

+

Parameter

+

Description

+

Value Range

+

ngram

+

gram_size

+

Length of word segmentation

+

Integer, 1 to 4

+

Default value: 2

+

punctuation_ignore

+

Whether to ignore punctuations

+
  • true (default value): Ignore punctuations.
  • false: Do not ignore punctuations.
+

grapsymbol_ignore

+

Whether to ignore graphical characters

+
  • true: Ignore graphical characters.
  • false (default value): Do not ignore graphical characters.
+
+ + +## Examples + +``` +-- Create a text search configuration. +postgres=# CREATE TEXT SEARCH CONFIGURATION ngram2 (parser=ngram) WITH (gram_size = 2, grapsymbol_ignore = false); + +-- Create a text search configuration. +postgres=# CREATE TEXT SEARCH CONFIGURATION ngram3 (copy=ngram2) WITH (gram_size = 2, grapsymbol_ignore = false); + +-- Add type mapping. +postgres=# ALTER TEXT SEARCH CONFIGURATION ngram2 ADD MAPPING FOR multisymbol WITH simple; + +-- Create user joe. +postgres=# CREATE USER joe IDENTIFIED BY 'Bigdata@123'; + +-- Change the owner of the text search configuration. +postgres=# ALTER TEXT SEARCH CONFIGURATION ngram2 OWNER TO joe; + +-- Change the schema of the text search configuration. +postgres=# ALTER TEXT SEARCH CONFIGURATION ngram2 SET SCHEMA joe; + +-- Rename the text search configuration. +postgres=# ALTER TEXT SEARCH CONFIGURATION joe.ngram2 RENAME TO ngram_2; + +-- Delete the type mapping. +postgres=# ALTER TEXT SEARCH CONFIGURATION joe.ngram_2 DROP MAPPING IF EXISTS FOR multisymbol; + +-- Delete the text search configuration. +postgres=# DROP TEXT SEARCH CONFIGURATION joe.ngram_2; +postgres=# DROP TEXT SEARCH CONFIGURATION ngram3; + +-- Delete the schema and user joe. +postgres=# DROP SCHEMA IF EXISTS joe CASCADE; +postgres=# DROP ROLE IF EXISTS joe; +``` + +## Helpful Links + +[ALTER TEXT SEARCH CONFIGURATION](alter-text-search-configuration.md) and [DROP TEXT SEARCH CONFIGURATION](drop-text-search-configuration.md) + diff --git a/content/en/docs/Developerguide/create-text-search-dictionary.md b/content/en/docs/Developerguide/create-text-search-dictionary.md new file mode 100644 index 000000000..69c710df7 --- /dev/null +++ b/content/en/docs/Developerguide/create-text-search-dictionary.md @@ -0,0 +1,143 @@ +# CREATE TEXT SEARCH DICTIONARY + +## Function + +**CREATE TEXT SEARCH DICTIONARY** creates a full-text retrieval dictionary. A dictionary is used to identify and process particular words during full-text retrieval. + +Dictionaries are created by using predefined templates \(defined in the [PG\_TS\_TEMPLATE](pg_ts_template.md) system catalog\). Five types of dictionaries can be created, **Simple**, **Ispell**, **Synonym**, **Thesaurus**, and **Snowball**. These dictionaries are used to handle different types of tasks. + +## Precautions + +- A user with the **SYSADMIN** permission can create a dictionary. Then, the user automatically becomes the owner of the dictionary. +- A dictionary cannot be created in **pg\_temp** mode. +- After a dictionary is created or modified, any modification to the customized dictionary definition file will not affect the dictionary in the database. To make such modifications take effect in the dictionary in the database, run the **ALTER** statement to update the definition file of the dictionary. + +## Syntax + +``` +CREATE TEXT SEARCH DICTIONARY name ( + TEMPLATE = template + [, option = value [, ... ]] +); +``` + +## Parameter Description + +- **name** + + Specifies the name of a dictionary to be created. \(If you do not specify a schema name, the dictionary will be created in the current schema.\) + + Value range: a string, which complies with the identifier naming convention. A value can contain a maximum of 63 characters. + +- **template** + + Specifies a template name. + + Value range: templates \(**Simple**, **Synonym**, **Thesaurus**, **Ispell**, and **Snowball**\) defined in the [PG\_TS\_TEMPLATE](pg_ts_template.md) system catalog + +- **option** + + Specifies a parameter name. Each type of dictionaries has a template containing their custom parameters. Parameters function in a way irrelevant to their setting sequence. + + - Parameters for a **Simple** dictionary + - **STOPWORDS** + + Specifies the name of a file listing stop words. The default file name extension is .stop. In the file, each line defines a stop word. Dictionaries will ignore blank lines and spaces in the file and convert stop-word phrases into lowercase. + + - **ACCEPT** + + Specifies whether to accept a non-stop word as recognized. Default value: **true** + + If **ACCEPT=true** is set for a **Simple** dictionary, no token will be passed to subsequent dictionaries. In this case, you are advised to place the **Simple** dictionary at the end of the dictionary list. If **ACCEPT=false** is set, you are advised to place the **Simple** dictionary before at least one dictionary in the list. + + - **FILEPATH** + + Specifies the directory for storing dictionary files. The directory can be a local directory or an OBS directory. The local directory format is **file://**_absolute\_path_. The OBS directory format is **obs://**_bucket\_name_**/path accesskey=ak secretkey=sk region=rg**. The default value is the directory where predefined dictionary files are located. If any of the **FILEPATH** and **STOPWORDS** parameters is specified, the other one must also be specified. + + - Parameters for a **Synonym** dictionary + - **SYNONYM** + + Specifies the name of the definition file for a **Synonym** dictionary. The default file name extension is .syn. + + The file is a list of synonyms. Each line is in the format of _token synonym_, that is, token and its synonym separated by a space. + + - **CASESENSITIVE** + + Specifies whether tokens and their synonyms are case sensitive. The default value is **false**, indicating that tokens and synonyms in dictionary files will be converted into lowercase. If this parameter is set to **true**, they will not be converted into lowercase. + + - **FILEPATH** + + Specifies the directory for storing **Synonym** dictionary files. The directory can be a local directory or an OBS directory. The local directory format is **file://**_absolute\_path_. The OBS directory format is **obs://**_bucket\_name_**/path accesskey=ak secretkey=sk region=rg**. The default value is the directory where predefined dictionary files are located. + + - Parameters for a **Thesaurus** dictionary + - **DICTFILE** + + Specifies the name of a dictionary definition file. The default file name extension is .ths. + + The file is a list of synonyms. Each line is in the format of _sample words_**:** _indexed words_. The colon \(:\) is used as a separator between a phrase and its substitute word. If multiple sample words are matched, the TZ selects the longest one. + + - **DICTIONARY** + + Specifies the name of a subdictionary used for word normalization. This parameter is mandatory and only one subdictionary name can be specified. The specified subdictionary must exist. It is used to identify and normalize input text before phrase matching. + + If an input word cannot be recognized by the subdictionary, an error will be reported. In this case, remove the word or update the subdictionary to make the word recognizable. In addition, an asterisk \(\*\) can be placed at the beginning of an indexed word to skip the application of a subdictionary on it, but all sample words must be recognizable by the subdictionary. + + If the sample words defined in the dictionary file contain stop words defined in the subdictionary, use question marks \(?\) to replace them. Assume that **a** and **the** are stop words defined in the subdictionary. + + ``` + ? one ? two : swsw + ``` + + **a one the two** and **the one a two** will be matched and output as **swsw**. + + - **FILEPATH** + + Specifies the directory for storing dictionary definition files. The directory can be a local directory or an OBS directory. The local directory format is **file://**_absolute\_path_. The OBS directory format is **obs://**_bucket\_name_**/path accesskey=ak secretkey=sk region=rg**. The default value is the directory where predefined dictionary files are located. + + - Parameters for an **Ispell** dictionary + - **DICTFILE** + + Specifies the name of a dictionary definition file. The default file name extension is .dict. + + - **AFFFILE** + + Specifies the name of an affix file. The default file name extension is .affix. + + - **STOPWORDS** + + Specifies the name of a file listing stop words. The default file name extension is .stop. The file content format is the same as that of the file for a **Simple** dictionary. + + - **FILEPATH** + + Specifies the directory for storing dictionary files. The directory can be a local directory or an OBS directory. The local directory format is **file://**_absolute\_path_. The OBS directory format is **obs://**_bucket\_name_**/path accesskey=ak secretkey=sk region=rg**. The default value is the directory where predefined dictionary files are located. + + - Parameters for a **Snowball** dictionary + - **LANGUAGE** + + Specifies the name of a language whose stemming algorithm will be used. According to spelling rules in the language, the algorithm normalizes the variants of an input word into a basic word or a stem. + + - **STOPWORDS** + + Specifies the name of a file listing stop words. The default file name extension is .stop. The file content format is the same as that of the file for a **Simple** dictionary. + + - **FILEPATH** + + Specifies the directory for storing dictionary definition files. The directory can be a local directory or an OBS directory. The local directory format is **file://**_absolute\_path_. The OBS directory format is **obs://**_bucket\_name_**/path accesskey=ak secretkey=sk region=rg**. The default value is the directory where predefined dictionary files are located. If any of the **FILEPATH** and **STOPWORDS** parameters is specified, the other one must also be specified. + + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The name of a dictionary definition file can contain only lowercase letters, digits, and underscores \(\_\). + +- **value** + + Specifies a parameter value. If the value is not an identifier or a number, enclose it with single quotation marks \(''\). You can also enclose identifiers and numbers. + + +## Examples + +See examples in [Configuration Examples](configuration-examples.md). + +## Helpful Links + +[ALTER TEXT SEARCH DICTIONARY](alter-text-search-dictionary.md) and [CREATE TEXT SEARCH DICTIONARY](create-text-search-dictionary.md) + diff --git a/content/en/docs/Developerguide/create-trigger.md b/content/en/docs/Developerguide/create-trigger.md new file mode 100644 index 000000000..ef1030dce --- /dev/null +++ b/content/en/docs/Developerguide/create-trigger.md @@ -0,0 +1,352 @@ +# CREATE TRIGGER + +## Function + +**CREATE TRIGGER** creates a trigger. The trigger will be associated with the specified table or view, and will execute the specified function operations are performed. + +## Precautions + +- Currently, triggers can be created only on ordinary row-store tables, instead of on column-store tables, temporary tables, or unlogged tables. +- If multiple triggers of the same kind are defined for the same event, they will be fired in alphabetical order by name. +- Triggers are usually used for data association and synchronization between multiple tables. SQL execution performance is greatly affected. Therefore, you are advised not to use this statement when a large amount of data needs to be synchronized and performance requirements are high. + +## Syntax + +``` +CREATE [ CONSTRAINT ] TRIGGER trigger_name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] } + ON table_name + [ FROM referenced_table_name ] + { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } } + [ FOR [ EACH ] { ROW | STATEMENT } ] + [ WHEN ( condition ) ] + EXECUTE PROCEDURE function_name ( arguments ); +``` + +Events include: + +``` + INSERT + UPDATE [ OF column_name [, ... ] ] + DELETE + TRUNCATE +``` + +## Parameter Description + +- **CONSTRAINT** + + \(Optional\) Creates a constraint trigger. That is, the trigger is used as a constraint. This is the same as a regular trigger except that the timing of the trigger firing can be adjusted using **SET CONSTRAINTS**. Constraint triggers must be **AFTER ROW** triggers. + +- **trigger\_name** + + Specifies the name of the trigger to be created. This must be distinct from the name of any other trigger for the same table. The name cannot be schema-qualified — the trigger inherits the schema of its table. For a constraint trigger, this is also the name to use when modifying the trigger's behavior using [SET CONSTRAINTS](set-constraints.md). + + Value range: a string, which complies with the identifier naming convention and contains a maximum of 63 characters. + +- **BEFORE** + + Specifies that the function is called before the event. + +- **AFTER** + + Specifies that the function is called after the event. A constraint trigger can only be specified as **AFTER**. + +- **INSTEAD OF** + + Specifies that the function is called instead of the event. + +- **event** + + Specifies the event that will fire the trigger. Values are **INSERT**, **UPDATE**, **DELETE**, and **TRUNCATE**. Multiple events can be specified using **OR**. + + For **UPDATE** events, it is possible to specify a list of columns using this syntax: + + ``` + UPDATE OF column_name1 [, column_name2 ... ] + ``` + + The trigger will only fire if at least one of the listed columns is mentioned as a target of the **UPDATE** statement. **INSTEAD OF UPDATE** events do not allow a list of columns. + +- **table\_name** + + Specifies the name of the table for which the trigger is created. + + Value range: name of an existing table in the database + +- **referenced\_table\_name** + + Specifies the name of another table referenced by the constraint. This option is used for foreign-key constraints. It can only be specified for constraint triggers. Because foreign keys are not supported currently, this option is not recommended for general use. + + Value range: name of an existing table in the database + +- **DEFERRABLE | NOT DEFERRABLE** + + Specifies the start time of the trigger. It can only be specified for constraint triggers. They determine whether the constraint is deferrable. + + For details, see [CREATE TABLE](create-table.md). + +- **INITIALLY IMMEDIATE** **| INITIALLY DEFERRED** + + If the constraint is deferrable, the two clauses specify the default time to check the constraint. It can only be specified for constraint triggers. + + For details, see [CREATE TABLE](create-table.md). + +- **FOR EACH ROW | FOR EACH STATEMENT** + + Specifies the frequency of firing the trigger. + + - **FOR EACH ROW** indicates that the trigger should be fired once for every row affected by the trigger event. + - **FOR EACH STATEMENT** indicates that the trigger should be fired just once per SQL statement. + + If neither is specified, the default is **FOR EACH STATEMENT**. Constraint triggers can only be marked as **FOR EACH ROW**. + +- **condition** + + Specifies whether the trigger function will actually be executed. If **WHEN** is specified, the function will be called only when **condition** returns **true**. + + In **FOR EACH ROW** triggers, the **WHEN** condition can refer to columns of the old and/or new row values by writing **OLD.**_column name_ or **NEW.**_column name_ respectively. In addition, **INSERT** triggers cannot refer to **OLD**, and **DELETE** triggers cannot refer to **NEW**. + + **INSTEAD OF** triggers do not support **WHEN** conditions. + + Currently, **WHEN** expressions cannot contain subqueries. + + Note that for constraint triggers, evaluation of the **WHEN** condition is not deferred, but occurs immediately after the row update operation is performed. If the condition does not evaluate to **true**, then the trigger is not queued for deferred execution. + +- **function\_name** + + Specifies a user-defined function, which must be declared as taking no parameters and returning type trigger. This is executed when a trigger fires. + +- **arguments** + + Specifies an optional comma-separated list of parameters to be provided to the function when the trigger is executed. The parameters are literal string constants. Simple names and numeric constants can also be written here, but they will all be converted to strings. Check the description of the implementation language of the trigger function to find out how these parameters can be accessed within the function. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The following details trigger types: + >- **INSTEAD OF** triggers must be marked as **FOR EACH ROW** and can be defined only on views. + >- **BEFORE** and **AFTER** triggers on a view must be marked as **FOR EACH STATEMENT**. + >- **TRUNCATE** triggers must be marked as **FOR EACH STATEMENT**. + + **Table 1** Types of triggers supported on tables and views + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

When

+

Event

+

Row-Level

+

Statement-Level

+

BEFORE

+

INSERT/UPDATE/DELETE

+

Tables

+

Tables and views

+

TRUNCATE

+

Not supported.

+

Tables

+

AFTER

+

INSERT/UPDATE/DELETE

+

Tables

+

Tables and views

+

TRUNCATE

+

Not supported.

+

Tables

+

INSTEAD OF

+

INSERT/UPDATE/DELETE

+

Views

+

Not supported.

+

TRUNCATE

+

Not supported.

+

Not supported.

+
+ + **Table 2** Special variables in PL/pgSQL functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Variable

+

Description

+

NEW

+

New tuple for INSERT and UPDATE operations. This variable is NULL for DELETE operations.

+

OLD

+

Old tuple for UPDATE and DELETE operations. This variable is NULL for INSERT operations.

+

TG_NAME

+

Trigger name.

+

TG_WHEN

+

Trigger timing (BEFORE, AFTER, or INSTEAD OF).

+

TG_LEVEL

+

Trigger frequency (ROW or STATEMENT).

+

TG_OP

+

Trigger event (INSERT, UPDATE, DELETE, or TRUNCATE).

+

TG_RELID

+

OID of the table where the trigger resides.

+

TG_RELNAME

+

Name of the table where the trigger resides. (This variable has been replaced by TG_TABLE_NAME.)

+

TG_TABLE_NAME

+

Name of the table where the trigger resides.

+

TG_TABLE_SCHEMA

+

Schema of the table where the trigger resides.

+

TG_NARGS

+

Number of parameters for the trigger function.

+

TG_ARGV[]

+

List of parameters for the trigger function.

+
+ + +## Examples + +``` +-- Create a source table and a destination table. +postgres=# CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); +postgres=# CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); + +-- Create a trigger function. +postgres=# CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS + $$ + DECLARE + BEGIN + INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3); + RETURN NEW; + END + $$ LANGUAGE PLPGSQL; + +postgres=# CREATE OR REPLACE FUNCTION tri_update_func() RETURNS TRIGGER AS + $$ + DECLARE + BEGIN + UPDATE test_trigger_des_tbl SET id3 = NEW.id3 WHERE id1=OLD.id1; + RETURN OLD; + END + $$ LANGUAGE PLPGSQL; + +postgres=# CREATE OR REPLACE FUNCTION TRI_DELETE_FUNC() RETURNS TRIGGER AS + $$ + DECLARE + BEGIN + DELETE FROM test_trigger_des_tbl WHERE id1=OLD.id1; + RETURN OLD; + END + $$ LANGUAGE PLPGSQL; + +-- Create an INSERT trigger. +postgres=# CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(); + +-- Create an UPDATE trigger. +postgres=# CREATE TRIGGER update_trigger + AFTER UPDATE ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_update_func(); + +-- Create a DELETE trigger. +postgres=# CREATE TRIGGER delete_trigger + BEFORE DELETE ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_delete_func(); + +-- Execute the INSERT event and check the trigger results. +postgres=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); +postgres=# SELECT * FROM test_trigger_src_tbl; +postgres=# SELECT * FROM test_trigger_des_tbl; //Check whether the trigger operation takes effect. + +-- Execute the UPDATE event and check the trigger results. +postgres=# UPDATE test_trigger_src_tbl SET id3=400 WHERE id1=100; +postgres=# SELECT * FROM test_trigger_src_tbl; +postgres=# SELECT * FROM test_trigger_des_tbl; //Check whether the trigger operation takes effect. + +-- Execute the DELETE event and check the trigger results. +postgres=# DELETE FROM test_trigger_src_tbl WHERE id1=100; +postgres=# SELECT * FROM test_trigger_src_tbl; +postgres=# SELECT * FROM test_trigger_des_tbl; //Check whether the trigger operation takes effect. + +-- Modify a trigger. +postgres=# ALTER TRIGGER delete_trigger ON test_trigger_src_tbl RENAME TO delete_trigger_renamed; + +-- Disable insert_trigger. +postgres=# ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER insert_trigger; + +-- Disable all triggers on the current table. +postgres=# ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER ALL; + +-- Delete triggers. +postgres=# DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +postgres=# DROP TRIGGER update_trigger ON test_trigger_src_tbl; +postgres=# DROP TRIGGER delete_trigger_renamed ON test_trigger_src_tbl; +``` + +## Helpful Links + +[ALTER TRIGGER](alter-trigger.md), [DROP TRIGGER](drop-trigger.md), and [ALTER TABLE](alter-table.md) + diff --git a/content/en/docs/Developerguide/create-type.md b/content/en/docs/Developerguide/create-type.md new file mode 100644 index 000000000..20aa2790a --- /dev/null +++ b/content/en/docs/Developerguide/create-type.md @@ -0,0 +1,274 @@ +# CREATE TYPE + +## Function + +**CREATE TYPE** registers a new data type for use in the current database. The user who defines a type becomes its owner. Types are designed only for row-store tables. + +The following data types can be created: composite type, base type, shell type, and enumerated type. + +- Composite type + + A composite type is specified by a list of attribute names and data types. If the data type of an attribute is collatable, the attribute's collation rule can also be specified. This is essentially the same as the row type of a table, but using **CREATE TYPE** avoids the need to create an actual table when all that is wanted is to define a type. A stand-alone composite type is useful as the parameter or return type of a function. + + To create a composite type, you must have the **USAGE** permission on all of its attribute types. + +- Base type + + You can create a base type \(scalar type\). Generally, these functions must be written in the underlying language. + +- Shell type + + A shell type is simply a placeholder for a type to be defined later; it is created by issuing **CREATE TYPE** with no parameters except for the type name. Shell types are needed as forward references when base types are created. + +- Enumerated type + + An enumerated type is a list of one or more quoted labels, each of which must be 1 to 64 bytes long. + + +## Precautions + +If a schema name is given then the type is created in the specified schema. Otherwise, it is created in the current schema. The type name must be distinct from the name of any existing type or domain in the same schema. \(Because tables have associated data types, the type name must also be distinct from the name of any existing table in the same schema.\) + +## Syntax + +``` +CREATE TYPE name AS + ( [ attribute_name data_type [ COLLATE collation ] [, ... ] ] ) + +CREATE TYPE name ( + INPUT = input_function, + OUTPUT = output_function + [ , RECEIVE = receive_function ] + [ , SEND = send_function ] + [ , TYPMOD_IN = +type_modifier_input_function ] + [ , TYPMOD_OUT = +type_modifier_output_function ] + [ , ANALYZE = analyze_function ] + [ , INTERNALLENGTH = { internallength | +VARIABLE } ] + [ , PASSEDBYVALUE ] + [ , ALIGNMENT = alignment ] + [ , STORAGE = storage ] + [ , LIKE = like_type ] + [ , CATEGORY = category ] + [ , PREFERRED = preferred ] + [ , DEFAULT = default ] + [ , ELEMENT = element ] + [ , DELIMITER = delimiter ] + [ , COLLATABLE = collatable ] +) + +CREATE TYPE name + +CREATE TYPE name AS ENUM + ( [ 'label' [, ... ] ] ) +``` + +## Parameter Description + +Composite type + +- **name** + + Specifies the name \(optionally schema-qualified\) of the type to be created. + +- **attribute\_name** + + Specifies the name of an attribute \(column\) for the composite type. + +- **data\_type** + + Specifies the name of an existing data type to become a column of the composite type. + +- **collation** + + Specifies the name of an existing collation rule to be associated with a column of the composite type. + + +Base type + +When creating a base type, you can place parameters in any order. The **input\_function** and **output\_function** parameters are mandatory, and other parameters are optional. + +- **input\_function** + + Specifies the name of a function that converts data from the type's external textual form to its internal form. + + The input function may be declared as taking one parameter of type cstring or taking three parameters of types cstring, oid, and integer. + + - The first parameter is the input text as a C string, + - the second parameter is the type's own OID \(except for array types, which instead receive their element type's OID\), + - and the third is the typmod of the destination column, if known \(**-1** will be passed if not\). + + The input function must return a value of the data type itself. Usually, an input function should be declared **STRICT**; if it is not, it will be called with a **NULL** first parameter when reading a **NULL** input value. The function must still return **NULL** in this case, unless it raises an error. \(This case is mainly meant to support domain input functions, which might need to reject **NULL** inputs.\) + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The input and output functions can be declared to have results or parameters of the new type, when they have to be created before the new type can be created. The type should first be defined as a shell type, which is a placeholder type that has no attributes except a name and an owner. This is done by issuing the **CREATE TYPE **_name_ statement, with no additional parameters. Then the I/O functions can be defined referencing the shell type. Finally, **CREATE TYPE** with a full definition replaces the shell entry with a complete, valid type definition, after which the new type can be used normally. + +- **output\_function** + + Specifies the name of a function that converts data from the type's internal form to its external textual form. + + The output function must be declared as taking one parameter of the new data type. The output function must return type cstring. Output functions are not invoked for **NULL** values. + +- **receive\_function** + + \(Optional\) Specifies the name of a function that converts data from the type's external binary form to its internal form. + + If this function is not supplied, the type cannot participate in binary input. The binary representation should be chosen to be cheap to convert to internal form, while being reasonably portable. \(For example, the standard integer data types use network byte order as the external binary representation, while the internal representation is in the machine's native byte order.\) The receive function should perform adequate checking to ensure that the value is valid. + + The receive function may be declared as taking one parameter of type internal or taking three parameters of types internal, oid, integer. + + - The first parameter is a pointer to a StringInfo buffer holding the received byte string; + - the latter two are the same as for the text input function. + + The receive function must return a value of the data type itself. Usually, a receive function should be declared **STRICT**; if it is not, it will be called with a **NULL** first parameter when reading a **NULL** input value. The function must still return **NULL** in this case, unless it raises an error. \(This case is mainly meant to support domain receive functions, which might need to reject **NULL** inputs.\) + +- **send\_function** + + \(Optional\) Specifies the name of a function that converts data from the type's internal form to its external binary form. + + If this function is not supplied, the type cannot participate in binary output. The send function must be declared as taking one parameter of the new data type. The send function must return type bytea. Send functions are not invoked for **NULL** values. + +- **type\_modifier\_input\_function** + + \(Optional\) Specifies the name of a function that converts an array of modifier\(s\) for the type to an internal form. + +- **type\_modifier\_output\_function** + + \(Optional\) Specifies the name of a function that converts the internal form of the type's modifier\(s\) to its external textual form. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >**type\_modifier\_input\_function** and **type\_modifier\_output\_function** are needed if the type supports modifiers, that is optional constraints attached to a type declaration, such as char\(5\) or numeric\(30,2\). openGauss allows user-defined types to take one or more simple constants or identifiers as modifiers. However, this information must be capable of being packed into a single non-negative integer value for storage in the system catalogs. The **type\_modifier\_input\_function** is passed the declared modifier\(s\) in the form of a cstring array. It must check the values for validity \(throwing an error if they are wrong\), and if they are correct, return a single non-negative integer value that will be stored as the column "typmod". Type modifiers will be rejected if the type does not have a **type\_modifier\_input\_function**. The **type\_modifier\_output\_function** converts the internal integer typmod value back to the correct form for user display. It must return a cstring value that is the exact string to append to the type name; for example numeric's function might return \(30,2\). It is allowed to omit the **type\_modifier\_output\_function**, in which case the default display format is just the stored typmod integer value enclosed in parentheses. + +- **analyze\_function** + + \(Optional\) Specifies the name of a function that performs statistical analysis for the data type. + + By default, **ANALYZE** will attempt to gather statistics using the type's "equals" and "less-than" operators, if there is a default b-tree operator class for the type. For non-scalar types, this behavior is likely to be unsuitable, so it can be overridden by specifying a custom analysis function. The analysis function must be declared to take one parameter of type internal and return a boolean result. + +- **internallength** + + \(Optional\) Specifies the length in bytes of the new type's internal representation. The default assumption is that it is variable-length. + + While the details of the new type's internal representation are only known to the I/O functions and other functions you create to work with the type, there are several attributes of the internal representation that must be declared to openGauss. **internallength** is the most important one. Base data types can be fixed-length, in which case **internallength** is a positive integer, or variable length, indicated by setting **internallength** to **VARIABLE**. \(Internally, this is represented by setting **typlen** to **-1**.\) The internal representation of all variable-length types must start with a 4-byte integer giving the total length of this value of the type. + +- **PASSEDBYVALUE** + + \(Optional\) Indicates that values of this data type are passed by value, rather than by reference. You cannot pass by value types whose internal representation is larger than the size of the Datum type \(4 bytes on most machines, 8 bytes on a few\). + +- **alignment** + + \(Optional\) Specifies the storage alignment requirement of the data type. If specified, it must be **char**, **int2**, **int4**, or **double**; the default is **int4**. + + The allowed values equate to alignment on 1, 2, 4, or 8 byte boundaries. Note that variable-length types must have an alignment of at least 4, since they necessarily contain an int4 as their first component. + +- **storage** + + \(Optional\) Specifies the storage strategy for the data type. + + If specified, it must be **plain**, **external**, **extended**, or **main**; the default is **plain**. + + - **plain** specifies that data of the type will always be stored in-line and not compressed. \(Only **plain** is allowed for fixed-length types.\) + - **extended** specifies that the system will first try to compress a long data value, and will move the value out of the main table row if it is still too long. + - **external** allows the value to be moved out of the main table, but the system will not try to compress it. + - **main** allows compression, but discourages moving the value out of the main table. \(Data items with this storage strategy might still be moved out of the main table if there is no other way to make a row fit, but they will be kept in the main table preferentially over **extended** and **external** items.\) + + All **storage** values other than **plain** imply that the functions of the data type can handle values that have been toasted. The specific other value given merely determines the default **TOAST** storage strategy for columns of a toastable data type; users can pick other strategies for individual columns using **ALTER TABLE SET STORAGE**. + + +- **like\_type** + + \(Optional\) Specifies the name of an existing data type that the new type will have the same representation as. The values of **internallength**, **passedbyvalue**, **alignment**, and **storage** are copied from that type, unless overridden by explicit specification elsewhere in this **CREATE TYPE** statement. + + Specifying representation in this way is especially useful when the low-level implementation of a new type references an existing type. + +- **category** + + \(Optional\) Specifies the category code \(a single ASCII character\) for this type. The default is **U** for a user-defined type. You may also choose other ASCII characters to create custom categories. + +- **preferred** + + \(Optional\) Specifies whether a type is preferred within its type category. If it is, the value will be **TRUE**, else **FALSE**. The default is **FALSE**. Be very careful about creating a preferred type within an existing type category, as this could cause surprising changes in behavior. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The **category** and **preferred** parameters can be used to help control which implicit cast will be applied in ambiguous situations. Each data type belongs to a category named by a single ASCII character, and each type is either preferred or not within its category. The parser will prefer casting to preferred types \(but only from other types within the same category\) when this rule is helpful in resolving overloaded functions or operators. For types that have no implicit casts to or from any other types, it is sufficient to leave these settings at the defaults. However, for a group of related types that have implicit casts, it is often helpful to mark them all as belonging to a category and select one or two of the most general types as being preferred within the category. The **category** parameter is especially useful when adding a user-defined type to an existing built-in category, such as the numeric or string types. However, it is also possible to create entirely-user-defined type categories. Select any ASCII character other than an uppercase letter to name such a category. + +- **default** + + \(Optional\) Specifies the default value for the data type. If this is omitted, the default is null. + + A default value can be specified, in case a user wants columns of the data type to default to something other than the null value. Specify the default with the **DEFAULT** keyword. \(Such a default can be overridden by an explicit **DEFAULT** clause attached to a particular column.\) + +- **element** + + \(Optional\) Specifies the type of array elements when an array type is created. For example, to define an array of 4-byte integers \(int4\), specify **ELEMENT = int4**. + +- **delimiter** + + \(Optional\) Specifies the delimiter character to be used between values in arrays made of this type. + + **delimiter** can be set to a specific character. The default delimiter is the comma \(,\). Note that the delimiter is associated with the array element type, not the array type itself. + +- **collatable** + + \(Optional\) Specifies whether this type's operations can use collation information. If they can, the value will be **TRUE**, else **FALSE** \(default\). + + If **collatable** is **TRUE**, column definitions and expressions of the type may carry collation information through use of the **COLLATE** clause. It is up to the implementations of the functions operating on the type to actually use the collation information; this does not happen automatically merely by marking the type collatable. + +- **lable** + + \(Optional\) Represents the textual label associated with one value of an enumerated type. It is a string of 1 to 64 characters. + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>Whenever a user-defined type is created, openGauss automatically creates an associated array type whose name consists of the element type's name prepended with an underscore \(\_\). + +## Examples + +``` +-- Create a composite type, create a table, insert data, and make a query. +postgres=# CREATE TYPE compfoo AS (f1 int, f2 text); +postgres=# CREATE TABLE t1_compfoo(a int, b compfoo); +postgres=# CREATE TABLE t2_compfoo(a int, b compfoo); +postgres=# INSERT INTO t1_compfoo values(1,(1,'demo')); +postgres=# INSERT INTO t2_compfoo select * from t1_typ5; +postgres=# SELECT (b).f1 FROM t1_compfoo; +postgres=# SELECT * FROM t1_compfoo t1 join t2_compfoo t2 on (t1.b).f1=(t1.b).f1; + +-- Rename the data type. +postgres=# ALTER TYPE compfoo RENAME TO compfoo1; + +-- Change the owner of the user-defined type compfoo1 to usr1. +postgres=# CREATE USER usr1 PASSWORD 'Bigdata@123'; +postgres=# ALTER TYPE compfoo1 OWNER TO usr1; + +-- Change the schema of the user-defined type compfoo1 to usr1. +postgres=# ALTER TYPE compfoo1 SET SCHEMA usr1; + +-- Add a new attribute to the data type. +postgres=# ALTER TYPE usr1.compfoo1 ADD ATTRIBUTE f3 int; + +-- Delete the compfoo1 type. +postgres=# DROP TYPE usr1.compfoo1 cascade; + +-- Delete related tables and users. +postgres=# DROP TABLE t1_compfoo; +postgres=# DROP TABLE t2_compfoo; +postgres=# DROP SCHEMA usr1; +postgres=# DROP USER usr1; + +-- Create an enumerated type. +postgres=# CREATE TYPE bugstatus AS ENUM ('create', 'modify', 'closed'); + +-- Add a label. +postgres=# ALTER TYPE bugstatus ADD VALUE IF NOT EXISTS 'regress' BEFORE 'closed'; + +-- Rename the label. +postgres=# ALTER TYPE bugstatus RENAME VALUE 'create' TO 'new'; +``` + +## Helpful Links + +[ALTER TYPE](alter-type.md) and [DROP TYPE](drop-type.md) + diff --git a/content/en/docs/Developerguide/create-user.md b/content/en/docs/Developerguide/create-user.md new file mode 100644 index 000000000..3100305b0 --- /dev/null +++ b/content/en/docs/Developerguide/create-user.md @@ -0,0 +1,114 @@ +# CREATE USER + +## Function + +**CREATE USER** creates a user. + +## Precautions + +- A user created using the **CREATE USER** statement has the **LOGIN** permission by default. +- A schema named after the user is automatically created in the database where the statement is executed, but not in other databases. You can run the **CREATE SCHEMA** statement to create such a schema for the user in other databases. +- The owner of an object created by a system administrator in a schema with the same name as a common user is the common user, not the system administrator. + +## Syntax + +``` +CREATE USER user_name [ [ WITH ] option [ ... ] ] [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' | DISABLE }; +``` + +The **option** clause is used to configure information, including permissions and properties. + +``` +{SYSADMIN | NOSYSADMIN} + | {MONADMIN | NOMONADMIN} + | {OPRADMIN | NOOPRADMIN} + | {POLADMIN | NOPOLADMIN} + | {AUDITADMIN | NOAUDITADMIN} + | {CREATEDB | NOCREATEDB} + | {USEFT | NOUSEFT} + | {CREATEROLE | NOCREATEROLE} + | {INHERIT | NOINHERIT} + | {LOGIN | NOLOGIN} + | {REPLICATION | NOREPLICATION} + | {INDEPENDENT | NOINDEPENDENT} + | {VCADMIN | NOVCADMIN} + | CONNECTION LIMIT connlimit + | VALID BEGIN 'timestamp' + | VALID UNTIL 'timestamp' + | RESOURCE POOL 'respool' + | PERM SPACE 'spacelimit' + | TEMP SPACE 'tmpspacelimit' + | SPILL SPACE 'spillspacelimit' + | IN ROLE role_name [, ...] + | IN GROUP role_name [, ...] + | ROLE role_name [, ...] + | ADMIN role_name [, ...] + | USER role_name [, ...] + | SYSID uid + | DEFAULT TABLESPACE tablespace_name + | PROFILE DEFAULT + | PROFILE profile_name + | PGUSER +``` + +## Parameter Description + +- **user\_name** + + Username + + Value range: a string. It must comply with the naming convention. It can contain a maximum of 63 characters. + +- **password** + + Specifies the login password. + + The new password must: + + - Contain at least eight characters. This is the default length. + - Differ from the username or the username spelled backward. + - Contain at least three types of the following four types of characters: uppercase characters \(A to Z\), lowercase characters \(a to z\), digits \(0 to 9\), and special characters, including: \~!@\#$%^&\*\(\)-\_=+\\|\[\{\}\];:,<.\>/? + - Be enclosed by single or double quotation marks. + + Value range: a string + + +For other parameters, see [CREATE ROLE](create-role.md). + +## Example + +``` +-- Create user jim whose login password is Bigdata@123: +postgres=# CREATE USER jim PASSWORD 'Bigdata@123'; + +-- Alternatively, you can run the following statement: +postgres=# CREATE USER kim IDENTIFIED BY 'Bigdata@123'; + +-- To create a user with the CREATEDB permission, add the CREATEDB keyword. +postgres=# CREATE USER dim CREATEDB PASSWORD 'Bigdata@123'; + +-- Change user jim's login password from Bigdata123@ to Abcd@123: +postgres=# ALTER USER jim IDENTIFIED BY 'Abcd@123' REPLACE 'Bigdata@123'; + +-- Add the CREATEROLE permission to jim. +postgres=# ALTER USER jim CREATEROLE; + +-- Set enable_seqscan to on. (The setting will take effect in the next session.) +postgres=# ALTER USER jim SET enable_seqscan TO on; + +-- Reset the enable_seqscan parameter for jim. +postgres=# ALTER USER jim RESET enable_seqscan; + +-- Lock jim. +postgres=# ALTER USER jim ACCOUNT LOCK; + +-- Delete users. +postgres=# DROP USER kim CASCADE; +postgres=# DROP USER jim CASCADE; +postgres=# DROP USER dim CASCADE; +``` + +## Helpful Links + +[ALTER USER](alter-user.md), [CREATE ROLE](create-role.md), and [DROP USER](drop-user.md) + diff --git a/content/en/docs/Developerguide/create-view.md b/content/en/docs/Developerguide/create-view.md new file mode 100644 index 000000000..468ecbea5 --- /dev/null +++ b/content/en/docs/Developerguide/create-view.md @@ -0,0 +1,74 @@ +# CREATE VIEW + +## Function + +**CREATE VIEW** creates a view. A view is a virtual table, not a base table. Only view definition is stored in the database and view data is not. The data is stored in a base table. If data in the base table changes, the data in the view changes accordingly. In this sense, a view is like a window through which users can know their interested data and data changes in the database. + +## Precautions + +None + +## Syntax + +``` +CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW view_name [ ( column_name [, ...] ) ] + [ WITH ( {view_option_name [= view_option_value]} [, ... ] ) ] + AS query; +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>You can use **WITH \(security\_barriers\)** to create a relatively secure view. This prevents attackers from printing hidden base table data by using the **RAISE** statement of low-cost functions. + +## Parameter Description + +- **OR REPLACE** + + Redefines the view if it already exists. + +- **TEMP | TEMPORARY** + + Creates a temporary view. + +- **view\_name** + + Specifies the name \(optionally schema-qualified\) of the view to be created. + + Value range: a string. It must comply with the naming convention. + +- **column\_name** + + Specifies an optional list of names to be used for columns of the view. If not given, the column names are deduced from the query. + + Value range: a string. It must comply with the naming convention. + +- **view\_option\_name \[= view\_option\_value\]** + + Specifies an optional parameter for a view. + + Currently, **view\_option\_name** supports only the **security\_barrier** parameter. This parameter is used when the view attempts to provide row-level security. + + Value range: Boolean type, **TRUE**, and **FALSE**. + +- **query** + + Specifies a **SELECT** or **VALUES** statement that will provide the columns and rows of the view. + + +## Examples + +``` +-- Create a view consisting of columns whose spcname is pg_default. +postgres=# CREATE VIEW myView AS + SELECT * FROM pg_tablespace WHERE spcname = 'pg_default'; + +-- Query the view. +postgres=# SELECT * FROM myView ; + +-- Delete the view. +postgres=# DROP VIEW myView; +``` + +## Helpful Links + +[ALTER VIEW](alter-view.md) and [DROP VIEW](drop-view.md) + diff --git a/content/en/docs/Developerguide/creating-an-index-30.md b/content/en/docs/Developerguide/creating-an-index-30.md new file mode 100644 index 000000000..c496d7891 --- /dev/null +++ b/content/en/docs/Developerguide/creating-an-index-30.md @@ -0,0 +1,59 @@ +# Creating an Index + +You can create a **GIN** index to speed up text searches: + +``` +postgres=# CREATE INDEX pgweb_idx_1 ON tsearch.pgweb USING gin(to_tsvector('english', body)); +``` + +The **to\_tsvector** function comes in to two versions: the 1-argument version and the 2-argument version. When the 1-argument version is used, the system uses the configuration specified by **default\_text\_search\_config** by default. + +Notice that the 2-argument version of **to\_tsvector** is used for index creation. Only text search functions that specify a configuration name can be used in expression indexes. This is because the index contents must be unaffected by **default\_text\_search\_config**, whose value can be changed at any time. If they were affected, the index contents might be inconsistent, because different entries could contain **tsvectors** that were created with different text search configurations, and there would be no way to guess which was which. It would be impossible to dump and restore such an index correctly. + +Because the two-argument version of **to\_tsvector** was used in the index above, only a query reference that uses the 2-argument version of **to\_tsvector** with the same configuration name will use that index. That is, **WHERE to\_tsvector\('english', body\) @@ 'a & b'** can use the index, but **WHERE to\_tsvector\(body\) @@ 'a & b'** cannot. This ensures that an index will be used only with the same configuration used to create the index entries. + +It is possible to set up more complex expression indexes wherein the configuration name is specified by another column. For example: + +``` +postgres=# CREATE INDEX pgweb_idx_2 ON tsearch.pgweb USING gin(to_tsvector('ngram', body)); +``` + +where **body** is a column in the **pgweb** table. This allows mixed configurations in the same index while recording which configuration was used for each index entry. This would be useful, for example, if the document collection contained documents in different languages. Again, queries that are meant to use the index must be phrased to match, for example, **WHERE to\_tsvector\(config\_name, body\) @@ 'a & b'** must match **to\_tsvector** in the index. + +Indexes can even concatenate columns: + +``` +postgres=# CREATE INDEX pgweb_idx_3 ON tsearch.pgweb USING gin(to_tsvector('english', title || ' ' || body)); +``` + +Another approach is to create a separate **tsvector** column to hold the output of **to\_tsvector**. This example is a concatenation of **title** and **body**, using **coalesce** to ensure that one column will still be indexed when the other is **NULL**: + +``` +postgres=# ALTER TABLE tsearch.pgweb ADD COLUMN textsearchable_index_col tsvector; +postgres=# UPDATE tsearch.pgweb SET textsearchable_index_col = to_tsvector('english', coalesce(title,'') || ' ' || coalesce(body,'')); +``` + +Then, create a GIN index to speed up the search: + +``` +postgres=# CREATE INDEX textsearch_idx_4 ON tsearch.pgweb USING gin(textsearchable_index_col); +``` + +Now you are ready to perform a fast full text search: + +``` +postgres=# SELECT title +FROM tsearch.pgweb +WHERE textsearchable_index_col @@ to_tsquery('north & america') +ORDER BY last_mod_date DESC +LIMIT 10; + + title +-------- + Canada + Mexico +(2 rows) +``` + +One advantage of the separate-column approach over an expression index is that it is unnecessary to explicitly specify the text search configuration in queries in order to use the index. As shown in the preceding example, the query can depend on **default\_text\_search\_config**. Another advantage is that searches will be faster, since it will not be necessary to redo the **to\_tsvector** calls to verify index matches. The expression-index approach is simpler to set up, however, and it requires less disk space since the **tsvector** representation is not stored explicitly. + diff --git a/content/en/docs/Developerguide/creating-an-index.md b/content/en/docs/Developerguide/creating-an-index.md new file mode 100644 index 000000000..61628f46e --- /dev/null +++ b/content/en/docs/Developerguide/creating-an-index.md @@ -0,0 +1,59 @@ +# Creating an Index + +You can create a **GIN** index to speed up text searches: + +``` +postgres=# CREATE INDEX pgweb_idx_1 ON tsearch.pgweb USING gin(to_tsvector('english', body)); +``` + +The **to\_tsvector** function comes in to two versions: the 1-argument version and the 2-argument version. When the 1-argument version is used, the system uses the configuration specified by **default\_text\_search\_config** by default. + +Notice that the 2-argument version of **to\_tsvector** is used for index creation. Only text search functions that specify a configuration name can be used in expression indexes. This is because the index contents must be unaffected by **default\_text\_search\_config**, whose value can be changed at any time. If they were affected, the index contents might be inconsistent, because different entries could contain **tsvectors** that were created with different text search configurations, and there would be no way to guess which was which. It would be impossible to dump and restore such an index correctly. + +Because the two-argument version of **to\_tsvector** was used in the index above, only a query reference that uses the 2-argument version of **to\_tsvector** with the same configuration name will use that index. That is, **WHERE to\_tsvector\('english', body\) @@ 'a & b'** can use the index, but **WHERE to\_tsvector\(body\) @@ 'a & b'** cannot. This ensures that an index will be used only with the same configuration used to create the index entries. + +It is possible to set up more complex expression indexes wherein the configuration name is specified by another column. For example: + +``` +postgres=# CREATE INDEX pgweb_idx_2 ON tsearch.pgweb USING gin(to_tsvector('ngram', body)); +``` + +where **body** is a column in the **pgweb** table. This allows mixed configurations in the same index while recording which configuration was used for each index entry. This would be useful, for example, if the document collection contained documents in different languages. Again, queries that are meant to use the index must be phrased to match, for example, **WHERE to\_tsvector\(config\_name, body\) @@ 'a & b'** must match **to\_tsvector** in the index. + +Indexes can even concatenate columns: + +``` +postgres=# CREATE INDEX pgweb_idx_3 ON tsearch.pgweb USING gin(to_tsvector('english', title || ' ' || body)); +``` + +Another approach is to create a separate **tsvector** column to hold the output of **to\_tsvector**. This example is a concatenation of **title** and **body**, using **coalesce** to ensure that one column will still be indexed when the other is **NULL**: + +``` +postgres=# ALTER TABLE tsearch.pgweb ADD COLUMN textsearchable_index_col tsvector; +postgres=# UPDATE tsearch.pgweb SET textsearchable_index_col = to_tsvector('english', coalesce(title,'') || ' ' || coalesce(body,'')); +``` + +Then, create a GIN index to speed up the search: + +``` +postgres=# CREATE INDEX textsearch_idx_4 ON tsearch.pgweb USING gin(textsearchable_index_col); +``` + +Now you are ready to perform a fast full text search: + +``` +postgres=# SELECT title +FROM tsearch.pgweb +WHERE textsearchable_index_col @@ to_tsquery('north & america') +ORDER BY last_mod_date DESC +LIMIT 10; + + title +-------- + Canada + Mexico +(2 rows) +``` + +One advantage of the separate-column approach over an expression index is that it is unnecessary to explicitly specify the text search configuration in queries in order to use the index. As shown in the preceding example, the query can depend on **default\_text\_search\_config**. Another advantage is that searches will be faster, since it will not be necessary to redo the **to\_tsvector** calls to verify index matches. The expression-index approach is simpler to set up, however, and it requires less disk space since the **tsvector** representation is not stored explicitly. + diff --git a/content/en/docs/Developerguide/creating-and-managing-databases.md b/content/en/docs/Developerguide/creating-and-managing-databases.md new file mode 100644 index 000000000..50129439f --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-databases.md @@ -0,0 +1,99 @@ +# Creating and Managing Databases + +## Prerequisites + +Only the database system administrator or users granted with database creation permissions can create a database. For details about how to grant database creation permissions to a user, see [Managing Users and Their Permissions](managing-users-and-their-permissions.md). + +## Background + +- openGauss has two default template databases **template0** and **template1** and a default user database **postgres**. +- **CREATE DATABASE** creates a database by copying a template database \(**template1** by default\). Do not use a client or any other tools to connect to or to perform operations on the template databases. +- A maximum of 128 databases can be created in openGauss. +- A database system consists of multiple databases. A client can connect to only one database at a time. Users cannot query data across databases. If an openGauss cluster contains multiple databases, set the **-d** parameter to specify the database to connect to. + +## Precautions + +Assume that the database encoding is SQL\_ASCII. \(You can run the **show server\_encoding** command to query the encoding used for storing data in the current database.\) If the database object name contains multi-byte characters \(such as Chinese\) or if the object name length exceeds the allowed maximum \(63 bytes\), the database truncates the last byte \(not the last character\) of the object name. In this case, half characters may appear. + +To resolve this problem, you need to: + +- Ensure that the name of the data object does not exceed the maximum length. +- Use a proper coded character set, such as UTF-8, as the default database storage code set \(**server\_encoding**\). +- Exclude multi-byte characters from object names. +- Ensure that no more than 128 databases are created. +- If you fail to delete an object by specifying its name after truncation, specify its original name to delete it, or manually delete it from the system catalogs on each node. + +## Procedure + +1. Create a database. + + 1. Run the following command to create a tablespace named **tpcds\_local**: + + ``` + postgres=# CREATE TABLESPACE tpcds_local RELATIVE LOCATION 'tablespace/tablespace_1' ; + CREATE TABLESPACE + ``` + + 2. Run the following command to create a database named **db\_tpcc**: + + ``` + postgres=# CREATE DATABASE db_tpcc WITH TABLESPACE = tpcds_local; + CREATE DATABASE + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- Database names must comply with the general naming convention rules of SQL identifiers. The current role automatically becomes the owner of this new database. + >- If a database system is used to support independent users and projects, store them in different databases. + >- If the projects or users are associated with each other and share resources, store them in one database. However, you can divide them into different schemas. A schema is a logical structure, and the access permission for a schema is controlled by the permission system module. + >- A database name contains a maximum of 63 bytes and the excessive bytes at the end of the name will be truncated by the server. You are advised to specify a database name no longer than 63 bytes when you create a database. + +2. View databases. + - Run the **\\l** meta-command to view the database list of the database system. + + ``` + postgres=# \l + ``` + + - Run the following command to query the database list in the **pg\_database** system catalog: + + ``` + postgres=# SELECT datname FROM pg_database; + ``` + +3. Modify the database. + + You can modify database configuration such as the database owner, name, and default settings. + + - Run the following command to set the default search path for the database: + + ``` + postgres=# ALTER DATABASE db_tpcc SET search_path TO pa_catalog,public; + ALTER DATABASE + ``` + + - Run the following command to modify the database tablespaces: + + ``` + postgres=# ALTER DATABASE db_tpcc SET TABLESPACE tpcds; + ALTER DATABASE + ``` + + - Run the following command to rename the database: + + ``` + postgres=# ALTER DATABASE db_tpcc RENAME TO human_tpcds; + ALTER DATABASE + ``` + +4. Delete the database. + + You can run the **[DROP DATABASE](drop-database.md)** command to delete a database. This command deletes the system directory in the database, as well as the database directory on the disk that stores data. Only the database owner or system administrator can delete a database. A database accessed by users cannot be deleted. You need to connect to another database before deleting this database. + + Run the following command to delete the database: + + ``` + postgres=# DROP DATABASE human_tpcds; + DROP DATABASE + ``` + + diff --git a/content/en/docs/Developerguide/creating-and-managing-indexes.md b/content/en/docs/Developerguide/creating-and-managing-indexes.md new file mode 100644 index 000000000..e81366c18 --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-indexes.md @@ -0,0 +1,238 @@ +# Creating and Managing Indexes + +## Background + +Indexes accelerate data access but increase the processing time of insertion, update, and deletion operations. Therefore, before creating an index, consider whether it is necessary and determine the columns where the index will be created. You can determine whether to add an index for a table by analyzing the service processing and data use of applications, as well as columns that are frequently used as search criteria or need to be collated. + +Indexes are created based on columns in database tables. Therefore, you must correctly identify which columns require indexes. You are advised to create indexes for any of the following columns: + +- Columns that are often searched and queried. This speeds up searches. +- Columns that function as primary keys. This enforces the uniqueness of the columns and the data collation structures in organized tables. +- Columns that are often searched by range. The index helps collate data, and therefore the specified ranges are contiguous. +- Columns that often need to be collated. The index helps collate data, reducing the time for a collation query. +- Columns where the **WHERE** clause is executed frequently. This speeds up condition judgment. +- Columns that often appear after the keywords **ORDER BY**, **GROUP BY**, and **DISTINCT**. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >- After an index is created, the system automatically determines when to reference it. If the system determines that indexing is faster than sequenced scanning, the index will be used. + >- After an index is successfully created, it must be synchronized with the associated table to ensure new data can be accurately located, which increases the data operation load. Therefore, delete unnecessary indexes periodically. + + +## Procedure + +For details about how to create a partitioned table, see [Creating and Managing Partitioned Tables](creating-and-managing-partitioned-tables.md). + +- Creating an index + - Create the partitioned table index **tpcds\_web\_returns\_p2\_index1** without specifying the partition name. + + ``` + postgres=# CREATE INDEX tpcds_web_returns_p2_index1 ON tpcds.web_returns_p2 (ca_address_id) LOCAL; + ``` + + If the following information is displayed, the table has been created: + + ``` + CREATE INDEX + ``` + + - Create the partitioned table index **tpcds\_web\_returns\_p2\_index2** with the partition name specified. + + ``` + postgres=# CREATE INDEX tpcds_web_returns_p2_index2 ON tpcds.web_returns_p2 (ca_address_sk) LOCAL + ( + PARTITION web_returns_p2_P1_index, + PARTITION web_returns_p2_P2_index TABLESPACE example3, + PARTITION web_returns_p2_P3_index TABLESPACE example4, + PARTITION web_returns_p2_P4_index, + PARTITION web_returns_p2_P5_index, + PARTITION web_returns_p2_P6_index, + PARTITION web_returns_p2_P7_index, + PARTITION web_returns_p2_P8_index + ) TABLESPACE example2; + ``` + + If the following information is displayed, the table has been created: + + ``` + CREATE INDEX + ``` + + +- Modifying the tablespace of an index partition + - Change the tablespace of index partition **web\_returns\_p2\_P2\_index** to **example1**. + + ``` + postgres=# ALTER INDEX tpcds.tpcds_web_returns_p2_index2 MOVE PARTITION web_returns_p2_P2_index TABLESPACE example1; + ``` + + If the following information is displayed, the tablespace of the index partition has been modified: + + ``` + ALTER INDEX + ``` + + - Change the tablespace of index partition **web\_returns\_p2\_P3\_index** to **example2**. + + ``` + postgres=# ALTER INDEX tpcds.tpcds_web_returns_p2_index2 MOVE PARTITION web_returns_p2_P3_index TABLESPACE example2; + ``` + + If the following information is displayed, the tablespace of the index partition has been modified: + + ``` + ALTER INDEX + ``` + + +- Renaming an index partition + + Rename the name of index partition **web\_returns\_p2\_P8\_index** to **web\_returns\_p2\_P8\_index\_new**. + + ``` + postgres=# ALTER INDEX tpcds.tpcds_web_returns_p2_index2 RENAME PARTITION web_returns_p2_P8_index TO web_returns_p2_P8_index_new; + ``` + + If the following information is displayed, the index partition has been renamed: + + ``` + ALTER INDEX + ``` + +- Querying indexes + - Run the following command to query all indexes defined by the system and users: + + ``` + postgres=# SELECT RELNAME FROM PG_CLASS WHERE RELKIND='i'; + ``` + + - Run the following command to query information about a specified index: + + ``` + postgres=# \di+ tpcds.tpcds_web_returns_p2_index2 + ``` + + +- Dropping an index + + ``` + postgres=# DROP INDEX tpcds.tpcds_web_returns_p2_index1; + postgres=# DROP INDEX tpcds.tpcds_web_returns_p2_index2; + ``` + + If the following information is displayed, the tables have been deleted: + + ``` + DROP INDEX + ``` + + +openGauss supports four methods for creating indexes. For details, see [Table 1](#en-us_topic_0237120308_en-us_topic_0059777978_tf6189b97ccf4474e871a7a982b53c2e4). + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- After an index is created, the system automatically determines when to reference it. If the system determines that indexing is faster than sequenced scanning, the index will be used. +>- After an index is successfully created, it must be synchronized with the associated table to ensure new data can be accurately located, which increases the data operation load. Therefore, delete unnecessary indexes periodically. + +**Table 1** Indexing methods + + + + + + + + + + + + + + + + + + + +

Indexing Method

+

Description

+

Unique index

+

An index that requires the uniqueness of an index attribute or an attribute group. If a table declares unique constraints or primary keys, openGauss automatically creates unique indexes (or composite indexes) for columns that form the primary keys or unique constraints. Currently, unique indexes can be created only for B-tree in openGauss.

+

Composite index

+

An index that can be defined for multiple attributes of a table. Currently, composite indexes can be created only for B-tree in openGauss and up to 32 columns can share a composite index.

+

Partial index

+

An index that can be created for subsets of a table. This indexing method contains only tuples that meet condition expressions.

+

Expression index

+

An index that is built on a function or expression calculated based on one or more attributes of a table. An expression index works only when the queried expression is the same as the created expression.

+
+ +- Create an ordinary table. + + ``` + postgres=# CREATE TABLE tpcds.customer_address_bak AS TABLE tpcds.customer_address; + INSERT 0 0 + ``` + + +- Create an ordinary index. + + For the **tpcds.customer\_address\_bak** table, you need to perform the following operations frequently: + + ``` + postgres=# SELECT ca_address_sk FROM tpcds.customer_address_bak WHERE ca_address_sk=14888; + ``` + + Generally, the database system needs to scan the **tpcds.customer\_address\_bak** table row by row to find all matched tuples. If the size of the **tpcds.customer\_address\_bak** table is large but only a few \(possibly zero or one\) of the **WHERE** conditions are met, the performance of this sequential scan is low. If the database system uses an index to maintain the **ca\_address\_sk** attribute, the database system only needs to search a few tree layers for the matched tuples. This greatly improves data query performance. Furthermore, indexes can improve the update and deletion operation performance in the database. + + Run the following command to create an index: + + ``` + postgres=# CREATE INDEX index_wr_returned_date_sk ON tpcds.customer_address_bak (ca_address_sk); + CREATE INDEX + ``` + +- Create a multi-column index. + + Assume you need to frequently query records with **ca\_address\_sk** being **5050** and **ca\_street\_number** smaller than **1000** in the **tpcds.customer\_address\_bak** table. Run the following commands: + + ``` + postgres=# SELECT ca_address_sk,ca_address_id FROM tpcds.customer_address_bak WHERE ca_address_sk = 5050 AND ca_street_number < 1000; + ``` + + Run the following command to define a composite index on **ca\_address\_sk** and **ca\_street\_number** columns: + + ``` + postgres=# CREATE INDEX more_column_index ON tpcds.customer_address_bak(ca_address_sk ,ca_street_number ); + CREATE INDEX + ``` + +- Create a partial index. + + If you only want to find records whose **ca\_address\_sk** is **5050**, you can create a partial index to facilitate your query. + + ``` + postgres=# CREATE INDEX part_index ON tpcds.customer_address_bak(ca_address_sk) WHERE ca_address_sk = 5050; + CREATE INDEX + ``` + +- Create an expression index. + + Assume that you need to frequently query records with **ca\_street\_number** smaller than **1000**, run the following command: + + ``` + postgres=# SELECT * FROM tpcds.customer_address_bak WHERE trunc(ca_street_number) < 1000; + ``` + + The following expression index can be created for this query task: + + ``` + postgres=# CREATE INDEX para_index ON tpcds.customer_address_bak (trunc(ca_street_number)); + CREATE INDEX + ``` + +- Delete the **tpcds.customer\_address\_bak** table. + + ``` + postgres=# DROP TABLE tpcds.customer_address_bak; + DROP TABLE + ``` + + diff --git a/content/en/docs/Developerguide/creating-and-managing-partitioned-tables.md b/content/en/docs/Developerguide/creating-and-managing-partitioned-tables.md new file mode 100644 index 000000000..d7fb9752a --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-partitioned-tables.md @@ -0,0 +1,200 @@ +# Creating and Managing Partitioned Tables + +## Background + +openGauss supports range partitioned tables. + +In a range partitioned table, data within a certain range is mapped to each partition. The range is determined by the partition key specified when the partitioned table is created. This partitioning mode is most commonly used. The partition key is usually a date. For example, sales data is partitioned by month. + +A partitioned table has the following advantages over an ordinary table: + +- High query performance: You can specify partitions when querying partitioned tables, improving query efficiency. +- High availability: If a certain partition in a partitioned table is faulty, data in the other partitions is still available. +- Easy maintenance: To fix a partitioned table having a faulty partition, you only need to fix the partition. +- Balanced I/O: Partitions can be mapped to different disks to balance I/O and improve the overall system performance. + +To convert an ordinary table to a partitioned table, you need to create a partitioned table and import data to it from the ordinary table. When you design tables, plan whether to use partitioned tables based on service requirements. + +## Procedure + +Perform the following operations on range partitioned tables. + +- Creating a tablespace + + ``` + postgres=# CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; + postgres=# CREATE TABLESPACE example2 RELATIVE LOCATION 'tablespace2/tablespace_2'; + postgres=# CREATE TABLESPACE example3 RELATIVE LOCATION 'tablespace3/tablespace_3'; + postgres=# CREATE TABLESPACE example4 RELATIVE LOCATION 'tablespace4/tablespace_4'; + ``` + + If the following information is displayed, the tablespace has been created: + + ``` + CREATE TABLESPACE + ``` + +- Creating a partitioned table + + ``` + postgres=# CREATE TABLE tpcds.customer_address + ( + ca_address_sk integer NOT NULL , + ca_address_id character(16) NOT NULL , + ca_street_number character(10) , + ca_street_name character varying(60) , + ca_street_type character(15) , + ca_suite_number character(10) , + ca_city character varying(60) , + ca_county character varying(30) , + ca_state character(2) , + ca_zip character(10) , + ca_country character varying(20) , + ca_gmt_offset numeric(5,2) , + ca_location_type character(20) + ) + TABLESPACE example1 + + PARTITION BY RANGE (ca_address_sk) + ( + PARTITION P1 VALUES LESS THAN(5000), + PARTITION P2 VALUES LESS THAN(10000), + PARTITION P3 VALUES LESS THAN(15000), + PARTITION P4 VALUES LESS THAN(20000), + PARTITION P5 VALUES LESS THAN(25000), + PARTITION P6 VALUES LESS THAN(30000), + PARTITION P7 VALUES LESS THAN(40000), + PARTITION P8 VALUES LESS THAN(MAXVALUE) TABLESPACE example2 + ) + ENABLE ROW MOVEMENT; + ``` + + If the following information is displayed, the partitioned table has been created: + + ``` + CREATE TABLE + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >You are advised to create a maximum of 1000 column-store partitioned tables. + +- Inserting data + + Insert data from the **tpcds.customer\_address** table to the **tpcds.web\_returns\_p2** table. + + Suppose a backup table **tpcds.web\_returns\_p2** of the **tpcds.customer\_address** table has been created in the database. You can run the following command to insert the data of the **tpcds.customer\_address** table into the backup table **tpcds.web\_returns\_p2**: + + ``` + postgres=# CREATE TABLE tpcds.web_returns_p2 + ( + ca_address_sk integer NOT NULL , + ca_address_id character(16) NOT NULL , + ca_street_number character(10) , + ca_street_name character varying(60) , + ca_street_type character(15) , + ca_suite_number character(10) , + ca_city character varying(60) , + ca_county character varying(30) , + ca_state character(2) , + ca_zip character(10) , + ca_country character varying(20) , + ca_gmt_offset numeric(5,2) , + ca_location_type character(20) + ) + TABLESPACE example1 + PARTITION BY RANGE (ca_address_sk) + ( + PARTITION P1 VALUES LESS THAN(5000), + PARTITION P2 VALUES LESS THAN(10000), + PARTITION P3 VALUES LESS THAN(15000), + PARTITION P4 VALUES LESS THAN(20000), + PARTITION P5 VALUES LESS THAN(25000), + PARTITION P6 VALUES LESS THAN(30000), + PARTITION P7 VALUES LESS THAN(40000), + PARTITION P8 VALUES LESS THAN(MAXVALUE) TABLESPACE example2 + ) + ENABLE ROW MOVEMENT; + CREATE TABLE + postgres=# INSERT INTO tpcds.web_returns_p2 SELECT * FROM tpcds.customer_address; + INSERT 0 0 + ``` + +- Modifying the row movement attributes of the partitioned table + + ``` + postgres=# ALTER TABLE tpcds.web_returns_p2 DISABLE ROW MOVEMENT; + ALTER TABLE + ``` + +- Deleting a partition + + Run the following command to delete partition **P8**: + + ``` + postgres=# ALTER TABLE tpcds.web_returns_p2 DROP PARTITION P8; + ALTER TABLE + ``` + +- Adding a partition + + Run the following command to add partition **P8** and set its range to \[40000, MAXVALUE\]: + + ``` + postgres=# ALTER TABLE tpcds.web_returns_p2 ADD PARTITION P8 VALUES LESS THAN (MAXVALUE); + ALTER TABLE + ``` + +- Renaming a partition + - Run the following command to rename partition **P8** to **P\_9**: + + ``` + postgres=# ALTER TABLE tpcds.web_returns_p2 RENAME PARTITION P8 TO P_9; + ALTER TABLE + ``` + + - Run the following command to rename partition **P\_9** to **P8**: + + ``` + postgres=# ALTER TABLE tpcds.web_returns_p2 RENAME PARTITION FOR (40000) TO P8; + ALTER TABLE + ``` + + +- Modifying the tablespace of a partition + - Run the following command to change the tablespace of partition **P6** to **example3**: + + ``` + postgres=# ALTER TABLE tpcds.web_returns_p2 MOVE PARTITION P6 TABLESPACE example3; + ALTER TABLE + ``` + + - Run the following command to change the tablespace of partition **P4** to **example4**: + + ``` + postgres=# ALTER TABLE tpcds.web_returns_p2 MOVE PARTITION P4 TABLESPACE example4; + ALTER TABLE + ``` + + +- Querying a partition + + Run the following command to query partition **P6**: + + ``` + postgres=# SELECT * FROM tpcds.web_returns_p2 PARTITION (P6); + postgres=# SELECT * FROM tpcds.web_returns_p2 PARTITION FOR (35888); + ``` + +- Deleting a partitioned table and its tablespaces + + ``` + postgres=# DROP TABLE tpcds.web_returns_p2; + DROP TABLE + postgres=# DROP TABLESPACE example1; + postgres=# DROP TABLESPACE example2; + postgres=# DROP TABLESPACE example3; + postgres=# DROP TABLESPACE example4; + DROP TABLESPACE + ``` + + diff --git a/content/en/docs/Developerguide/creating-and-managing-schemas.md b/content/en/docs/Developerguide/creating-and-managing-schemas.md new file mode 100644 index 000000000..e25691381 --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-schemas.md @@ -0,0 +1,155 @@ +# Creating and Managing Schemas + +## Background + +Schemas function as models. Schema management allows multiple users to use the same database without mutual impacts, to organize database objects as manageable logical groups, and to add third-party applications to the same schema without causing conflicts. Schema management involves creating a schema, using a schema, deleting a schema, setting a search path for a schema, and setting schema permissions. + +## Precautions + +- openGauss contains one or more named databases. Users and user groups are shared within the openGauss, but their data is not shared. Any user who has connected to a server can access only the database specified in the connection request. +- A database can have one or more schemas, and a schema can contain tables and other data objects, such as data types, functions, and operators. One object name can be used in different schemas. For example, both **schema1** and **schema2** can have a table named **mytable**. +- Different from databases, schemas are not isolated. You can access the objects in a schema of the connected database if you have schema permissions. To manage schema permissions, you need to have knowledge about database permissions. +- A schema named with the **PG\_** prefix cannot be created because this type of schema is reserved for the database system. +- Each time a new user is created, the system creates a schema with the same name for the new user in the current database. In other databases, such a schema needs to be manually created. +- To reference a table that is not modified with a schema name, the system uses **search\_path** to find the schema that the table belongs to. **pg\_temp** and **pg\_catalog** are always the first two schemas to be searched no matter whether or how they are specified in **search\_path**. **search\_path** is a schema name list, and the first table detected in it is the target table. If no target table is found, an error will be reported. \(If a table exists but the schema it belongs to is not listed in **search\_path**, the search fails as well.\) The first schema in **search\_path** is called "current schema". This schema is the first one to be searched. If no schema name is declared, newly created database objects are saved in this schema by default. +- Each database has a **pg\_catalog** schema, which contains system catalogs and all embedded data types, functions, and operators. **pg\_catalog** is a part of the search path and has the second highest search priority. It is searched after the schema of temporary tables and before other schemas specified in **search\_path**. This search order ensures that database built-in objects can be found. To use a custom object that has the same name as a built-in object, you can specify the schema of the custom object. + +## Procedure + +- Managing users and their permissions + - Run the following command to create a schema: + + ``` + postgres=# CREATE SCHEMA myschema; + ``` + + If the following information is displayed, the schema named **myschema** is successfully created: + + ``` + CREATE SCHEMA + ``` + + To create or access an object in the schema, the object name in the command should consist of the schema name and the object name, which are separated by a dot \(.\), for example, **myschema.table**. + + - Run the following command to create a schema and specify the owner: + + ``` + postgres=# CREATE SCHEMA myschema AUTHORIZATION omm; + ``` + + If the following information is displayed, the **myschema** schema that belongs to **omm** is created successfully: + + ``` + CREATE SCHEMA + ``` + + +- Use a schema. + + If you want to create or access an object in a specified schema, the object name must contain the schema name. To be specific, the name consists of a schema name and an object name, which are separated by a dot \(.\). + + - Run the following command to create the **mytable** table in **myschema**: + + ``` + postgres=# CREATE TABLE myschema.mytable(id int, name varchar(20)); + CREATE TABLE + ``` + + To specify the location of an object, the object name must contain the schema name. + + - Run the following command to query all data of the **mytable** table in **myschema**: + + ``` + postgres=# SELECT * FROM myschema.mytable; + id | name + ----+------ + (0 rows) + ``` + + +- View **search\_path** of a schema. + + You can set **search\_path** to specify the sequence of schemas in which objects are searched. The first schema listed in **search\_path** will become the default schema. If no schema is specified during object creation, the object will be created in the default schema. + + - Run the following command to view **search\_path**: + + ``` + postgres=# SHOW SEARCH_PATH; + search_path + ---------------- + "$user",public + (1 row) + ``` + + - Run the following command to set **search\_path** to **myschema** and **public** \(**myschema** will be searched first\): + + ``` + postgres=# SET SEARCH_PATH TO myschema, public; + SET + ``` + + +- Set permissions for a schema. + + By default, a user can only access database objects in their own schema. Only after a user is granted with the usage permission for a schema by the schema owner, the user can access the objects in the schema. + + By granting the **CREATE** permission for a schema to a user, the user can create objects in this schema. By default, all roles have the **USAGE** permission in the **public** schema, but common users do not have the **CREATE** permission in the **public** schema. It is insecure for a common user to connect to a specified database and create objects in its **public** schema. If the common user has the **CREATE** permission on the **public** schema, it is advised to: + + - Run the following command to revoke **PUBLIC**'s permission to create objects in the **public** schema. **public** indicates the schema and **PUBLIC** indicates all roles. + + ``` + postgres=# REVOKE CREATE ON SCHEMA public FROM PUBLIC; + REVOKE + ``` + + - Run the following command to view the current schema: + + ``` + postgres=# SELECT current_schema(); + current_schema + ---------------- + myschema + (1 row) + ``` + + - Run the following commands to create user **jack** and grant the **usage** permission for **myschema** to the user: + + ``` + postgres=# CREATE USER jack IDENTIFIED BY 'Bigdata@123'; + CREATE ROLE + postgres=# GRANT USAGE ON schema myschema TO jack; + GRANT + ``` + + - Run the following command to revoke the usage permission for **myschema** from **jack**: + + ``` + postgres=# REVOKE USAGE ON schema myschema FROM jack; + REVOKE + ``` + + +- Delete a schema. + - If a schema is empty, that is, it contains no database objects, you can execute the **DROP SCHEMA** command to delete it. For example, run the following command to delete an empty schema named **nullschema**: + + ``` + postgres=# DROP SCHEMA IF EXISTS nullschema; + DROP SCHEMA + ``` + + - To delete a schema that is not null, use the keyword **CASCADE** to delete it and all its objects. For example, run the following command to delete **myschema** and all its objects in it: + + ``` + postgres=# DROP SCHEMA myschema CASCADE; + DROP SCHEMA + ``` + + - Run the following command to delete user **jack**: + + ``` + postgres=# DROP USER jack; + DROP ROLE + ``` + + + diff --git a/content/en/docs/Developerguide/creating-and-managing-sequences.md b/content/en/docs/Developerguide/creating-and-managing-sequences.md new file mode 100644 index 000000000..e4f49cdac --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-sequences.md @@ -0,0 +1,78 @@ +# Creating and Managing Sequences + +## Background + +A sequence is a database object that generates unique integers. Sequence numbers are generated according to a certain rule. Sequences are unique because they increase automatically. This is why they are often used as primary keys. + +You can create a sequence for a column in either of the following methods: + +- Set the data type of a column to [sequence integer](numeric-data-types.md#en-us_topic_0237121927_en-us_topic_0059778296_t5262f987c61c4a8caff8c8037e912874). A sequence will be automatically created by the database for this column. +- Run the **[CREATE SEQUENCE](create-sequence.md)** statement to create a sequence. Set the initial value of the **nextval**\('_sequence\_name_'\) function to the default value of a column. + +## Procedure + +Method 1: Set the data type of a column to a sequence integer. For example: + +``` +postgres=# CREATE TABLE T1 +( + id serial, + name text +); +``` + +If the following information is displayed, the table has been created: + +``` +CREATE TABLE +``` + +Method 2: Create a sequence and set the initial value of the **nextval**\('_sequence\_name_'\) function to the default value of a column. + +1. Create a sequence. + + ``` + postgres=# CREATE SEQUENCE seq1 cache 100; + ``` + + If the following information is displayed, the sequence has been created: + + ``` + CREATE SEQUENCE + ``` + +2. Set the initial value of the **nextval**\('_sequence\_name_'\) function to the default value of a column. + + ``` + postgres=# CREATE TABLE T2 + ( + id int not null default nextval('seq1'), + name text + ); + ``` + + If the following information is displayed, the default value has been specified: + + ``` + CREATE TABLE + ``` + +3. Associate the sequence with a column. + + Associates a sequence with a specified column included in a table. In this way, the sequence will be deleted when you delete its associated column or the table where the column belongs to. + + ``` + postgres=# ALTER SEQUENCE seq1 OWNED BY T2.id; + ``` + + If the following information is displayed, the column has been specified: + + ``` + ALTER SEQUENCE + ``` + + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>The preceding methods are similar, except that the second method specifies cache for the sequence. A sequence having cache defined has inconsecutive values \(such as 1, 4, and 5\) and cannot maintain the order of its values. After the dependent column of a sequence has been specified, once the sequence is deleted, the sequence of the dependent will be deleted. A sequence shared by multiple columns is not forbidden in a database, but you are not advised to do that. +>In the current version, you can specify the auto-increment column or set the default value of a column to **nextval\('seqname'\)** when defining a table. You cannot add an auto-increment column or a column whose default value is **nextval\('seqname'\)** to an existing table. + diff --git a/content/en/docs/Developerguide/creating-and-managing-tables.md b/content/en/docs/Developerguide/creating-and-managing-tables.md new file mode 100644 index 000000000..2658b9d5e --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-tables.md @@ -0,0 +1,13 @@ +# Creating and Managing Tables + +- **[Creating Tables](creating-tables.md)** + +- **[Inserting Data to Tables](inserting-data-to-tables.md)** + +- **[Updating Data in a Table](updating-data-in-a-table.md)** + +- **[Viewing Data](viewing-data.md)** + +- **[Deleting Data from a Table](deleting-data-from-a-table.md)** + + diff --git a/content/en/docs/Developerguide/creating-and-managing-tablespaces.md b/content/en/docs/Developerguide/creating-and-managing-tablespaces.md new file mode 100644 index 000000000..243911f58 --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-tablespaces.md @@ -0,0 +1,175 @@ +# Creating and Managing Tablespaces + +## Background + +The administrator can use tablespaces to control the layout of disks where a database is installed. This has the following advantages: + +- If the initial disk partition or volume allocated to the database is full and the space cannot be logically increased, you can create and use tablespaces in other partitions until the space is reconfigured. + +- Tablespaces allow the administrator to distribute data based on the schema of database objects, improving system performance. + - A frequently used index can be placed in a disk having stable performance and high computing speed, such as a solid device. + - A table that stores archived data and is rarely used or has low performance requirements can be placed in a disk with a slow computing speed. + + +- The administrator can use tablespaces to set the maximum available disk space. In this way, when a partition is shared with other data, tablespaces will not occupy excessive space in the partition. +- You can use tablespaces to control the disk space occupied by data in a database. If the usage of a disk where a tablespace resides reaches 90%, the database switches to the read-only mode. It switches back to read/write mode when the disk usage becomes less than 90%. + + You are advised to use the background monitoring program or Database Manager to monitor the disk space usage when using the database to prevent the database from switching to the read-only mode. + +- Each tablespace corresponds to a file system directory. Assume that _Database node data directory_**/pg\_location/mount1/path1** is an empty directory for which users have read and write permissions. + + If the tablespace quota management is used, the performance may deteriorate by about 30%. **MAXSIZE** specifies the maximum quota for each database node. The deviation must be within 500 MB. Determine whether to set a tablespace to its maximum size as required. + + +## Procedure + +- Create a tablespace. + 1. Run the following command to create user **jack**: + + ``` + postgres=# CREATE USER jack IDENTIFIED BY 'Bigdata@123'; + ``` + + If the following information is displayed, the user has been created: + + ``` + CREATE ROLE + ``` + + 2. Run the following command to create a tablespace: + + ``` + postgres=# CREATE TABLESPACE fastspace RELATIVE LOCATION 'tablespace/tablespace_1'; + ``` + + If the following information is displayed, the tablespace has been created: + + ``` + CREATE TABLESPACE + ``` + + **fastspace** is the new tablespace, and _Database node data directory_**/pg\_location/tablespace/tablespace\_1** is an empty directory for which users have read and write permissions. + + 3. A database system administrator can run the following command to grant the permission of accessing the **fastspace** tablespace to user **jack**: + + ``` + postgres=# GRANT CREATE ON TABLESPACE fastspace TO jack; + ``` + + If the following information is displayed, the permission has been assigned: + + ``` + GRANT + ``` + + + +- Create an object in a tablespace. + + If you have the **CREATE** permission for the tablespace, you can create database objects in the tablespace, such as tables and indexes. + + Take creating a table as an example: + + - Method 1: Run the following command to create a table in a specified tablespace: + + ``` + postgres=# CREATE TABLE foo(i int) TABLESPACE fastspace; + ``` + + If the following information is displayed, the table has been created: + + ``` + CREATE TABLE + ``` + + - Method 2: Use **set default\_tablespace** to set the default tablespace and then create a table: + + ``` + postgres=# SET default_tablespace = 'fastspace'; + SET + postgres=# CREATE TABLE foo2(i int); + CREATE TABLE + ``` + + In this example, **fastspace** is the default tablespace, and **foo2** is the created table. + + +- Use one of the following methods to query a tablespace: + - Method 1: Check the **pg\_tablespace** system catalog. Run the following command to view all the tablespaces defined by the system and users: + + ``` + postgres=# SELECT spcname FROM pg_tablespace; + ``` + + - Method 2: Run the following meta-command of the **gsql** program to query the tablespaces: + + ``` + postgres=# \db + ``` + + +- Query the tablespace usage. + 1. Query the current usage of the tablespace. + + ``` + postgres=# SELECT PG_TABLESPACE_SIZE('example'); + ``` + + Information similar to the following is displayed: + + ``` + pg_tablespace_size + -------------------- + 2146304 + (1 row) + ``` + + **2146304** is the size of the tablespace, and its unit is byte. + + 2. Calculate the tablespace usage. + + Tablespace usage rate = **PG\_TABLESPACE\_SIZE**/Size of the disk where the tablespace resides + + +- Modify a tablespace. + + Run the following command to rename tablespace **fastspace** to **fspace**: + + ``` + postgres=# ALTER TABLESPACE fastspace RENAME TO fspace; + ALTER TABLESPACE + ``` + +- Delete a tablespace and related data. + - Run the following command to delete user **jack**: + + ``` + postgres=# DROP USER jack CASCADE; + DROP ROLE + ``` + + - Run the following commands to delete tables **foo** and **foo2**: + + ``` + postgres=# DROP TABLE foo; + postgres=# DROP TABLE foo2; + ``` + + If the following information is displayed, the tables have been deleted: + + ``` + DROP TABLE + ``` + + - Run the following command to delete tablespace **fspace**: + + ``` + postgres=# DROP TABLESPACE fspace; + DROP TABLESPACE + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >Only the tablespace owner or system administrator can delete a tablespace. + + + diff --git a/content/en/docs/Developerguide/creating-and-managing-views.md b/content/en/docs/Developerguide/creating-and-managing-views.md new file mode 100644 index 000000000..06bdd70dc --- /dev/null +++ b/content/en/docs/Developerguide/creating-and-managing-views.md @@ -0,0 +1,55 @@ +# Creating and Managing Views + +## Background + +If some columns in one or more tables in a database are frequently searched for, an administrator can define a view for these columns, and then users can directly access these columns in the view without entering search criteria. + +A view is different from a base table. It is only a virtual object rather than a physical one. Only view definition is stored in the database and view data is not. The data is stored in a base table. If data in the base table changes, the data in the view changes accordingly. In this sense, a view is like a window through which users can know their interested data and data changes in the database. A view is triggered every time it is referenced. + +## Managing Views + +- Creating a view + + Run the following command to create **MyView**: + + ``` + postgres=# CREATE OR REPLACE VIEW MyView AS SELECT * FROM tpcds.web_returns WHERE trunc(wr_refunded_cash) > 10000; + CREATE VIEW + ``` + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >The **OR REPLACE** parameter in this command is optional. It indicates that if the view exists, the new view will replace the existing view. + +- Querying a view + + Run the following command to query **MyView**: + + ``` + postgres=# SELECT * FROM MyView; + ``` + +- Viewing details about a specified view + + Run the following command to view details about the **dba\_users** view: + + ``` + postgres=# \d+ dba_users + View "PG_CATALOG.DBA_USERS" + Column | Type | Modifiers | Storage | Description + ----------+-----------------------+-----------+----------+------------- + USERNAME | CHARACTER VARYING(64) | | extended | + View definition: + SELECT PG_AUTHID.ROLNAME::CHARACTER VARYING(64) AS USERNAME + FROM PG_AUTHID; + ``` + +- Deleting a view + + Run the following command to delete **MyView**: + + ``` + postgres=# DROP VIEW MyView; + DROP VIEW + ``` + + diff --git a/content/en/docs/Developerguide/creating-dropping-a-mot-table.md b/content/en/docs/Developerguide/creating-dropping-a-mot-table.md new file mode 100644 index 000000000..13a8f713c --- /dev/null +++ b/content/en/docs/Developerguide/creating-dropping-a-mot-table.md @@ -0,0 +1,22 @@ +# Creating/Dropping a MOT Table + +Creating a Memory Optimized Table \(MOT\) is very simple. Only the create and drop table statements in MOT differ from the statements for disk-based tables in openGauss. The syntax of **all other **commands for SELECT, DML and DDL are the same for MOT tables as for openGauss disk-based tables. + +- To create a MOT table – + + ``` + create FOREIGN table test(x int) [server mot_server]; + ``` + +- Always use the FOREIGN keyword to refer to MOT tables. +- The \[server mot\_server\] part is optional when creating a MOT table because MOT is an integrated engine, not a separate server. +- The above is an extremely simple example creating a table named **test** with a single integer column named **x**. In the next section \(**Creating an Index**\) a more realistic example is provided. +- To drop a MOT table named test– + + ``` + drop FOREIGN table test; + ``` + + +For a description of the limitations of supported features for MOT tables, such as data types, see the [SQL Coverage and Limitations](sql-coverage-and-limitations.md#EN-US_TOPIC_0257867392) section. + diff --git a/content/en/docs/Developerguide/creating-tables.md b/content/en/docs/Developerguide/creating-tables.md new file mode 100644 index 000000000..c232cd770 --- /dev/null +++ b/content/en/docs/Developerguide/creating-tables.md @@ -0,0 +1,28 @@ +# Creating Tables + +## Background + +A table is created in a database and can be saved in different databases. Tables under different schemas in a database can have the same name. Before creating a table, refer to [Planning a Storage Model](planning-a-storage-model.md). + +## Procedure + +Run the following statement to create a table: + +``` +postgres=# CREATE TABLE customer_t1 +( + c_customer_sk integer, + c_customer_id char(5), + c_first_name char(6), + c_last_name char(8) +); +``` + +If the following information is displayed, the table has been created: + +``` + CREATE TABLE +``` + +**c\_customer\_sk**, **c\_customer\_id**, **c\_first\_name**, and **c\_last\_name** are the column names of the table. **integer**, **char\(5\)**, **char\(6\)**, and **char\(8\)** are column name types. + diff --git a/content/en/docs/Developerguide/cursor-loop.md b/content/en/docs/Developerguide/cursor-loop.md new file mode 100644 index 000000000..34643423e --- /dev/null +++ b/content/en/docs/Developerguide/cursor-loop.md @@ -0,0 +1,18 @@ +# Cursor Loop + +Use of cursors in WHILE and LOOP statements is called a cursor loop. Generally, OPEN, FETCH, and CLOSE statements are involved in this kind of loop. The following describes a loop that simplifies a cursor loop without the need for these operations. This kind of loop is applicable to a static cursor loop, without executing four steps about a static cursor. + +## Syntax + +[Figure 1](#en-us_topic_0237122246_en-us_topic_0059778239_fd1982700d7d8496a9358b5d029a0123b) shows the syntax diagram of the **FOR AS** loop. + +**Figure 1** FOR\_AS\_loop::= +![](figures/for_as_loop.png "for_as_loop") + +## Precautions + +- The **UPDATE** operation for the queried table is not allowed in the loop statement. +- The variable loop\_name is automatically defined and is valid only in this loop. Its type is the same as that in the select\_statement query result. The value of **loop\_name** is the query result of **select\_statement**. + +- The **%FOUND**, **%NOTFOUND**, and **%ROWCOUNT** attributes access the same internal variable in openGauss. Transactions and the anonymous block do not support multiple cursor accesses at the same time. + diff --git a/content/en/docs/Developerguide/cursor-operations.md b/content/en/docs/Developerguide/cursor-operations.md new file mode 100644 index 000000000..4259e025c --- /dev/null +++ b/content/en/docs/Developerguide/cursor-operations.md @@ -0,0 +1,8 @@ +# Cursor Operations + +openGauss provides cursors as a data buffer for users to store execution results of SQL statements. Each cursor region has a name. Users can use SQL statements to obtain records one by one from cursors and grant the records to master variables, then being processed further by host languages. + +Cursor operations include cursor definition, open, fetch, and close operations. + +For the complete example of cursor operations, see [Explicit Cursor](explicit-cursor.md). + diff --git a/content/en/docs/Developerguide/cursor.md b/content/en/docs/Developerguide/cursor.md new file mode 100644 index 000000000..edc590c30 --- /dev/null +++ b/content/en/docs/Developerguide/cursor.md @@ -0,0 +1,65 @@ +# CURSOR + +## Function + +**CURSOR** defines a cursor to retrieve a small number of rows at a time out of a larger query. + +To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers pointing to context regions. With cursors, stored procedures can control alterations in context regions. + +## Precautions + +- **CURSOR** is used only in transaction blocks. +- Generally, **CURSOR** and **SELECT** both have text returns. Since data is stored in binary format in the system, the system needs to convert the data from the binary format to the text format. If data is returned in text format, client applications need to convert the data back to the binary format for processing. **FETCH** implements conversion between binary data and text data. +- Binary cursors should be used carefully. Text usually occupies larger space than binary data. A binary cursor returns internal binary data, which is easier to operate. A text cursor returns text, which is easier to retrieve and therefore reduces workload on the client. As an example, if a query returns a value of one from an integer column, you would get a string of 1 with a default cursor, whereas with a binary cursor you would get a 4-byte field containing the internal representation of the value \(in big-endian byte order\). + +## Syntax + +``` +CURSOR cursor_name + [ BINARY ] [ NO SCROLL ] [ { WITH | WITHOUT } HOLD ] + FOR query ; +``` + +## Parameter Description + +- **cursor\_name** + + Specifies the name of the cursor to be created. + + Value range: a string. It must comply with the naming convention. + +- **BINARY** + + Causes the cursor to return data in binary rather than in text format. + +- **NO SCROLL** + + Specifies how the cursor retrieves rows. + + - **NO SCROLL**: specifies that the cursor cannot be used to retrieve rows in a nonsequential fashion. + - Unspecified: Based on the query's execution plan, the system automatically determines whether the cursor can be used to retrieve rows in a nonsequential fashion. + +- **WITH HOLD | WITHOUT HOLD** + + Specifies whether the cursor can continue to be used after the transaction that created it successfully commits. + + - **WITH HOLD**: The cursor can continue to be used after the transaction that created it successfully commits. + - **WITHOUT HOLD**: The cursor cannot be used outside of the transaction that created it. + - If neither **WITH HOLD** nor **WITHOUT HOLD** is specified, the default is **WITHOUT HOLD**. + - Cross-node transactions \(for example, DDL-contained transactions created in openGauss with multiple DBnode\) do not support **WITH HOLD**. + +- **query** + + Uses a **SELECT** or **VALUES** clause to specify the rows to be returned by the cursor. + + Value range: **SELECT** or **VALUES** clause + + +## Examples + +See [Examples](fetch.md#en-us_topic_0237122165_en-us_topic_0059778422_s1ee72832a27547e4949061a010e24578) in **FETCH**. + +## Helpful Links + +[FETCH](fetch.md) + diff --git a/content/en/docs/Developerguide/cursors.md b/content/en/docs/Developerguide/cursors.md new file mode 100644 index 000000000..7e1e8238f --- /dev/null +++ b/content/en/docs/Developerguide/cursors.md @@ -0,0 +1,11 @@ +# Cursors + +- **[Overview](overview-23.md)** + +- **[Explicit Cursor](explicit-cursor.md)** + +- **[Implicit Cursor](implicit-cursor.md)** + +- **[Cursor Loop](cursor-loop.md)** + + diff --git a/content/en/docs/Developerguide/data-export-by-a-user-without-required-permissions.md b/content/en/docs/Developerguide/data-export-by-a-user-without-required-permissions.md new file mode 100644 index 000000000..b179f7309 --- /dev/null +++ b/content/en/docs/Developerguide/data-export-by-a-user-without-required-permissions.md @@ -0,0 +1,125 @@ +# Data Export By a User Without Required Permissions + +**gs\_dump** and **gs\_dumpall** use **-U** to specify the user that performs the export. If the specified user does not have the required permissions, data cannot be exported. In this case, you can set **--role** in the **gs\_dump** or **gs\_dumpall** command to the role that has the permissions. Then, **gs\_dump** or **gs\_dumpall** uses the specified role to export data. + +## Procedure + +1. Log in as the OS user **omm** to the primary node of the database. +2. Use **gs\_dump** to export data of the **human\_resource** database. + + User **jack** does not have the permissions to export data of the **human\_resource** database and the role **role1** has this permission. To export data of the **human\_resource** database, you can set **--role** to **role1** in the **gs\_dump** command. The exported files are in .tar format. + + ``` + gs_dump -U jack -W Bigdata@234 -f /home/omm/backup/MPPDB_backup.tar -p 8000 human_resource --role role1 --rolepassword abc@1234 -F t + ``` + + **Table 1** Common parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Description

+

Example Value

+

-U

+

Username for database connection.

+

-U jack

+

-W

+

User password for database connection.

+
  • This parameter is not required for database administrators if the trust policy is used for authentication.
  • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
+

-W Bigdata@123

+

-f

+

Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output.

+

-f /home/omm/backup/MPPDB_backup.tar

+

-p

+

TCP port or local Unix-domain socket file extension on which the server is listening for connections.

+

-p 8000

+

dbname

+

Name of the database to export.

+

human_resource

+

--role

+

Role name for the export operation. After this parameter is set, the SET ROLE command will be issued after gs_dump or gs_dumpall connects to the database. It is useful when the user specified by -U does not have the permissions required by gs_dump or gs_dumpall. This parameter allows you to switch to a role with the required permissions.

+

-r role1

+

--rolepassword

+

Role password.

+

--rolepassword abc@1234

+

-F

+

Select the format of file to export. The values of -F are as follows:

+
  • p: plaintext
  • c: custom
  • d: directory
  • t: .tar
+

-F t

+
+ + For details about other parameters, see "Server Tools \> gs\_dump" or "Server Tools \> gs\_dumpall" in the _Tool Reference_. + + +## Examples + +Example 1: User **jack** does not have the permissions required to export data of the **human\_resource** database using **gs\_dump** and the role **role1** has the permissions. To export data of the **human\_resource** database, you can set **--role** to **role1** in the **gs\_dump** command. The exported files are in .tar format. + +``` +human_resource=# CREATE USER jack IDENTIFIED BY "1234@abc"; +CREATE ROLE + +gs_dump -U jack -W 1234@abc -f /home/omm/backup/MPPDB_backup11.tar -p 8000 human_resource --role role1 --rolepassword abc@1234 -F t +gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: dump database human_resource successfully +gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: total time: 4239 ms +``` + +Example 2: User **jack** does not have the permissions required to export the **public** schema using **gs\_dump** and the role **role1** has the permissions. To export the **public** schema, you can set **--role** to **role1** in the **gs\_dump** command. The exported files are in .tar format. + +``` +human_resource=# CREATE USER jack IDENTIFIED BY "1234@abc"; +CREATE ROLE + +gs_dump -U jack -W 1234@abc -f /home/omm/backup/MPPDB_backup12.tar -p 8000 human_resource -n public --role role1 --rolepassword abc@1234 -F t +gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: dump database human_resource successfully +gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: total time: 3278 ms +``` + +Example 3: User **jack** does not have the permissions required to export all databases in a cluster using **gs\_dumpall** and the role **role1** \(cluster administrator\) has the permissions. To export all the databases, you can set **--role** to **role1** in the **gs\_dumpall** command. The exported files are in text format. + +``` +human_resource=# CREATE USER jack IDENTIFIED BY "1234@abc"; +CREATE ROLE + +gs_dumpall -U jack -W 1234@abc -f /home/omm/backup/MPPDB_backup.sql -p 8000 --role role1 --rolepassword abc@1234 +gs_dumpall[port='8000'][human_resource][2018-11-14 17:26:18]: dumpall operation successful +gs_dumpall[port='8000'][human_resource][2018-11-14 17:26:18]: total time: 6437 ms +``` + diff --git a/content/en/docs/Developerguide/data-import-using-copy-from-stdin.md b/content/en/docs/Developerguide/data-import-using-copy-from-stdin.md new file mode 100644 index 000000000..4fb633f19 --- /dev/null +++ b/content/en/docs/Developerguide/data-import-using-copy-from-stdin.md @@ -0,0 +1,9 @@ +# Data Import Using COPY FROM STDIN + +This method is applicable to low-concurrency scenarios where a small volume of data is to import. + +Run the **COPY FROM STDIN** statement to import data to openGauss in either of the following ways: + +- Write data into the openGauss database by typing. For details, see [COPY](copy.md). +- Import data from a file or database to openGauss through the CopyManager interface driven by JDBC. You can use any parameters in the **COPY** syntax. + diff --git a/content/en/docs/Developerguide/data-type-conversion.md b/content/en/docs/Developerguide/data-type-conversion.md new file mode 100644 index 000000000..98b26e86c --- /dev/null +++ b/content/en/docs/Developerguide/data-type-conversion.md @@ -0,0 +1,156 @@ +# Data Type Conversion + +Certain data types in the database support implicit data type conversions, such as assignments and parameters called by functions. For other data types, you can use the type conversion functions provided by openGauss, such as the **CAST** function, to forcibly convert them. + +openGauss lists common implicit data type conversions in [Table 1](#en-us_topic_0237122212_en-us_topic_0059778807_tbc67231ae8bc4ca484efaebd2629a0e4). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>The valid value range of **DATE** supported by openGauss is from 4713 B.C. to 294276 A.D. + +**Table 1** Implicit data type conversions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Raw Data Type

+

Target Data Type

+

Remarks

+

CHAR

+

VARCHAR2

+

-

+

CHAR

+

NUMBER

+

Raw data must consist of digits.

+

CHAR

+

DATE

+

Raw data cannot exceed the valid date range.

+

CHAR

+

RAW

+

-

+

CHAR

+

CLOB

+

-

+

VARCHAR2

+

CHAR

+

-

+

VARCHAR2

+

NUMBER

+

Raw data must consist of digits.

+

VARCHAR2

+

DATE

+

Raw data cannot exceed the valid date range.

+

VARCHAR2

+

CLOB

+

-

+

NUMBER

+

CHAR

+

-

+

NUMBER

+

VARCHAR2

+

-

+

DATE

+

CHAR

+

-

+

DATE

+

VARCHAR2

+

-

+

RAW

+

CHAR

+

-

+

RAW

+

VARCHAR2

+

-

+

CLOB

+

CHAR

+

-

+

CLOB

+

VARCHAR2

+

-

+

CLOB

+

NUMBER

+

Raw data must consist of digits.

+

INT4

+

CHAR

+

-

+
+ diff --git a/content/en/docs/Developerguide/data-types-22.md b/content/en/docs/Developerguide/data-types-22.md new file mode 100644 index 000000000..f53a4f908 --- /dev/null +++ b/content/en/docs/Developerguide/data-types-22.md @@ -0,0 +1,4 @@ +# Data Types + +A data type refers to a value set and an operation set defined on the value set. The openGauss database consists of tables, each of which is defined by its own columns. Each column corresponds to a data type. The openGauss uses corresponding functions to perform operations on data based on data types. For example, the openGauss can perform addition, subtraction, multiplication, and division operations on data of numeric values. + diff --git a/content/en/docs/Developerguide/data-types-36.md b/content/en/docs/Developerguide/data-types-36.md new file mode 100644 index 000000000..556fbf9aa --- /dev/null +++ b/content/en/docs/Developerguide/data-types-36.md @@ -0,0 +1,4 @@ +# Data Types + +A data type refers to a value set and an operation set defined on the value set. The openGauss database consists of tables, each of which is defined by its own columns. Each column corresponds to a data type. The openGauss uses corresponding functions to perform operations on data based on data types. For example, the openGauss can perform addition, subtraction, multiplication, and division operations on data of numeric values. + diff --git a/content/en/docs/Developerguide/data-types-supported-by-column-store-tables.md b/content/en/docs/Developerguide/data-types-supported-by-column-store-tables.md new file mode 100644 index 000000000..9eb43b190 --- /dev/null +++ b/content/en/docs/Developerguide/data-types-supported-by-column-store-tables.md @@ -0,0 +1,214 @@ +# Data Types Supported by Column-store Tables + +[Table 1](#en-us_topic_0237121962_table1899319136548) lists the data types supported by column-store tables. + +**Table 1** Data types supported by column-store tables + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Category

+

Data Type

+

Length

+

Supported or Not

+

Numeric Types

+

smallint

+

2

+

Supported

+

integer

+

4

+

Supported

+

bigint

+

8

+

Supported

+

decimal

+

-1

+

Supported

+

numeric

+

-1

+

Supported

+

real

+

4

+

Supported

+

double precision

+

8

+

Supported

+

smallserial

+

2

+

Supported

+

serial

+

4

+

Supported

+

bigserial

+

8

+

Supported

+

Monetary Types

+

money

+

8

+

Supported

+

Character Types

+

character varying(n), varchar(n)

+

-1

+

Supported

+

character(n), char(n)

+

n

+

Supported

+

character, char

+

1

+

Supported

+

text

+

-1

+

Supported

+

nvarchar2

+

-1

+

Supported

+

name

+

64

+

Not supported

+

Date/Time Types

+

timestamp with time zone

+

8

+

Supported

+

timestamp without time zone

+

8

+

Supported

+

date

+

4

+

Supported

+

time without time zone

+

8

+

Supported

+

time with time zone

+

12

+

Supported

+

interval

+

16

+

Supported

+

big object

+

clob

+

-1

+

Supported

+

blob

+

-1

+

Not supported

+

other types

+

...

+

...

+

Not supported

+
+ diff --git a/content/en/docs/Developerguide/data-types.md b/content/en/docs/Developerguide/data-types.md new file mode 100644 index 000000000..4fe7d1843 --- /dev/null +++ b/content/en/docs/Developerguide/data-types.md @@ -0,0 +1,35 @@ +# Data Types + +openGauss supports implicit conversions between certain data types. For details, see [PG\_CAST](pg_cast.md). + +- **[Numeric Data Types](numeric-data-types.md)** + +- **[Monetary](monetary.md)** + +- **[Boolean Data Types](boolean-data-types.md)** + +- **[Character Data Types](character-data-types.md)** + +- **[Binary Data Types](binary-data-types.md)** + +- **[Date/Time Types](date-time-types.md)** + +- **[Geometric](geometric.md)** + +- **[Network Address](network-address.md)** + +- **[Bit String Types](bit-string-types.md)** + +- **[Text Search Types](text-search-types.md)** + +- **[UUID Type](uuid-type.md)** + +- **[JSON Types](json-types.md)** + +- **[Object Identifier Types](object-identifier-types.md)** + +- **[Pseudo-Types](pseudo-types.md)** + +- **[Data Types Supported by Column-store Tables](data-types-supported-by-column-store-tables.md)** + + diff --git a/content/en/docs/Developerguide/database-connection-control-functions.md b/content/en/docs/Developerguide/database-connection-control-functions.md new file mode 100644 index 000000000..ad181fc62 --- /dev/null +++ b/content/en/docs/Developerguide/database-connection-control-functions.md @@ -0,0 +1,23 @@ +# Database Connection Control Functions + +Database connection control functions control the connections to GaussDB servers. An application can connect to multiple servers at a time. For example, a client connects to multiple databases. Each connection is represented by a PGconn object, which is obtained from the function PQconnectdb, PQconnectdbParams, or PQsetdbLogin. Note that these functions will always return a non-null object pointer, unless there is too little memory to allocate the PGconn object. The interface for establishing a connection is stored in the PGconn object. The PQstatus function can be called to check the return value for a successful connection. + +- **[PQconnectdbParams](pqconnectdbparams.md)** + +- **[PQconnectdb](pqconnectdb.md)** + +- **[PQconninfoParse](pqconninfoparse.md)** + +- **[PQconnectStart](pqconnectstart.md)** + +- **[PQerrorMessage](pqerrormessage.md)** + +- **[PQsetdbLogin](pqsetdblogin.md)** + +- **[PQfinish](pqfinish.md)** + +- **[PQreset](pqreset.md)** + +- **[PQstatus](pqstatus.md)** + + diff --git a/content/en/docs/Developerguide/database-logical-architecture.md b/content/en/docs/Developerguide/database-logical-architecture.md new file mode 100644 index 000000000..ee150f2b0 --- /dev/null +++ b/content/en/docs/Developerguide/database-logical-architecture.md @@ -0,0 +1,14 @@ +# Database Logical Architecture + +Data nodes \(DNs\) in openGauss store data on disks. This section describes the objects on each DN from the logical view and the relationship between these objects. [Figure 1](#en-us_topic_0237120245_en-us_topic_0059779316_fb2fa3b3cc8824dea95318504e0537913) shows the database logical structure. + +**Figure 1** Database logical architecture +![](figures/database-logical-architecture.png "database-logical-architecture") + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- Tablespace: Directory storing physical files of its databases. Multiple tablespaces can coexist, and each of them can contain files belonging to different databases. +>- Database: A database manages various data objects and is isolated from each other. Objects managed by a database can be distributed to multiple tablespaces. +>- Datafile Segment: Data file, each of which stores data of only one table. A table containing more than 1 GB of data is stored in multiple datafile segments. +>- One table belongs to only one database and one tablespace. The datafile segments storing the data of the same table must be in the same tablespace. +>- Block: Basic unit of database management. Its default size is 8 KB. + diff --git a/content/en/docs/Developerguide/database-object-functions.md b/content/en/docs/Developerguide/database-object-functions.md new file mode 100644 index 000000000..657cc0cf8 --- /dev/null +++ b/content/en/docs/Developerguide/database-object-functions.md @@ -0,0 +1,454 @@ +# Database Object Functions + +## Database Object Size Functions + +Database object size functions calculate the actual disk space used by database objects. + +- pg\_column\_size\(any\) + + Description: Specifies the number of bytes used to store a particular value \(possibly compressed\) + + Return type: int + + Note: **pg\_column\_size** displays the space for storing an independent data value. + + ``` + postgres=# SELECT pg_column_size(1); + pg_column_size + ---------------- + 4 + (1 row) + ``` + +- pg\_database\_size\(oid\) + + Description: Specifies the disk space used by the database with the specified OID. + + Return type: bigint + +- pg\_database\_size\(name\) + + Description: Specifies the disk space used by the database with the specified name. + + Return type: bigint + + Note: **pg\_database\_size** receives the OID or name of a database and returns the disk space used by the corresponding object. + + For example: + + ``` + postgres=# SELECT pg_database_size('postgres'); + pg_database_size + ------------------ + 51590112 + (1 row) + ``` + +- pg\_relation\_size\(oid\) + + Description: Specifies the disk space used by the table with a specified OID or index. + + Return type: bigint + +- get\_db\_source\_datasize\(\) + + Description: Estimates the total size of non-compressed data in the current database. + + Return type: bigint + + Note: \(1\) **ANALYZE** must be performed before this function is called. \(2\) Calculate the total size of non-compressed data by estimating the compression rate of column-store tables. + + For example: + + ``` + postgres=# analyze; + ANALYZE + postgres=# select get_db_source_datasize(); + get_db_source_datasize + ------------------------ + 35384925667 + (1 row) + ``` + +- pg\_relation\_size\(text\) + + Description: Specifies the disk space used by the table with a specified name or index. The table name can be schema-qualified. + + Return type: bigint + +- pg\_relation\_size\(relation regclass, fork text\) + + Description: Specifies the disk space used by the specified bifurcating tree \('main', 'fsm', or 'vm'\) of a certain table or index. + + Return type: bigint + +- pg\_relation\_size\(relation regclass\) + + Description: Is an abbreviation of **pg\_relation\_size\(..., 'main'\)**. + + Return type: bigint + + Note: **pg\_relation\_size** receives the OID or name of a table, index, or compressed table, and returns the size. + +- pg\_partition\_size\(oid,oid\) + + Description: Specifies the disk space used by the partition with a specified OID. The first **oid** is the OID of the table and the second **oid** is the OID of the partition. + + Return type: bigint + +- pg\_partition\_size\(text, text\) + + Description: Specifies the disk space used by the partition with a specified name. The first **text** is the table name and the second **text** is the partition name. + + Return type: bigint + +- pg\_partition\_indexes\_size\(oid,oid\) + + Description: Specifies the disk space used by the index of the partition with a specified OID. The first **oid** is the OID of the table and the second **oid** is the OID of the partition. + + Return type: bigint + +- pg\_partition\_indexes\_size\(text,text\) + + Description: Specifies the disk space used by the index of the partition with a specified name. The first **text** is the table name and the second **text** is the partition name. + + Return type: bigint + +- pg\_indexes\_size\(regclass\) + + Description: Specifies the total disk space used by the index appended to the specified table. + + Return type: bigint + +- pg\_size\_pretty\(bigint\) + + Description: Converts a size in bytes expressed as a 64-bit integer into a human-readable format with size units. + + Return type: text + +- pg\_size\_pretty\(numeric\) + + Description: Converts a size in bytes expressed as a numeric value into a human-readable format with size units. + + Return type: text + + Note: **pg\_size\_pretty** formats the results of other functions into a human-readable format. KB/MB/GB/TB can be used. + +- pg\_table\_size\(regclass\) + + Description: Specifies the disk space used by the specified table, excluding indexes \(but including TOAST, free space mapping, and visibility mapping\). + + Return type: bigint + +- pg\_tablespace\_size\(oid\) + + Description: Specifies the disk space used by the tablespace with a specified OID. + + Return type: bigint + +- pg\_tablespace\_size\(name\) + + Description: Specifies the disk space used by the tablespace with a specified name. + + Return type: bigint + + Note: + + **pg\_tablespace\_size** receives the OID or name of a database and returns the disk space used by the corresponding object. + +- pg\_total\_relation\_size\(oid\) + + Description: Specifies the disk space used by the table with a specified OID, including the index and the compressed data. + + Return type: bigint + +- pg\_total\_relation\_size\(regclass\) + + Description: Specifies the total disk space used by the specified table, including all indexes and TOAST data. + + Return type: bigint + +- pg\_total\_relation\_size\(text\) + + Description: Specifies the disk space used by the table with a specified name, including the index and the compressed data. The table name can be schema-qualified. + + Return type: bigint + + Note: **pg\_total\_relation\_size** receives the OID or name of a table or a compressed table, and returns the sizes of the data, related indexes, and the compressed table in bytes. + +- datalength\(any\) + + Description: Specifies the number of bytes used by an expression of a specified data type \(data management space, data compression, or data type conversion is not considered\). + + Return type: int + + Note: **datalength** is used to calculate the space of an independent data value. + + For example: + + ``` + postgres=# SELECT datalength(1); + datalength + ------------ + 4 + (1 row) + ``` + + The following table lists the supported data types and calculation methods. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Data Type

+

Storage Space

+

Numeric data types

+

Integer types

+

TINYINT

+

1

+

SMALLINT

+

2

+

INTEGER

+

4

+

BINARY_INTEGER

+

4

+

BIGINT

+

8

+

Any-precision types

+

DECIMAL

+

Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.

+

NUMERIC

+

Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.

+

NUMBER

+

Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.

+

Sequence integer

+

SMALLSERIAL

+

2

+

SERIAL

+

4

+

BIGSERIAL

+

8

+

Floating point types

+

FLOAT4

+

4

+

DOUBLE PRECISION

+

8

+

FLOAT8

+

8

+

BINARY_DOUBLE

+

8

+

FLOAT[(p)]

+

Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.

+

DEC[(p[,s])]

+

Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.

+

INTEGER[(p[,s])]

+

Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.

+

Boolean data types

+

Boolean type

+

BOOLEAN

+

1

+

Character data types

+

Character types

+

CHAR

+

n

+

CHAR(n)

+

n

+

CHARACTER(n)

+

n

+

NCHAR(n)

+

n

+

VARCHAR(n)

+

n

+

CHARACTER

+

Actual number of bytes of a character

+

VARYING(n)

+

Actual number of bytes of a character

+

VARCHAR2(n)

+

Actual number of bytes of a character

+

NVARCHAR2(n)

+

Actual number of bytes of a character

+

TEXT

+

Actual number of bytes of a character

+

CLOB

+

Actual number of bytes of a character

+

Time data types

+

Time types

+

DATE

+

8

+

TIME

+

8

+

TIMEZ

+

12

+

TIMESTAMP

+

8

+

TIMESTAMPZ

+

8

+

SMALLDATETIME

+

8

+

INTERVAL DAY TO SECOND

+

16

+

INTERVAL

+

16

+

RELTIME

+

4

+

ABSTIME

+

4

+

TINTERVAL

+

12

+
+ + +## Database Object Position Functions + +- pg\_relation\_filenode\(relation regclass\) + + Description: Specifies the ID of a filenode with the specified relationship. + + Return type: oid + + Description: **pg\_relation\_filenode** receives the OID or name of a table, index, sequence, or compressed table, and returns the **filenode** number allocated to it. The **filenode** is the basic component of the file name used by the relationship. For most tables, the result is the same as that of **pg\_class.relfilenode**. For the specified system directory, **relfilenode** is **0** and this function must be used to obtain the correct value. If a relationship that is not stored is transmitted, such as a view, this function returns **NULL**. + +- pg\_relation\_filepath\(relation regclass\) + + Description: Specifies the name of a file path with the specified relationship. + + Return type: text + + Description: **pg\_relation\_filepath** is similar to **pg\_relation\_filenode**, except that **pg\_relation\_filepath** returns the whole file path name for the relationship \(relative to the data directory **PGDATA** of openGauss\). + + diff --git a/content/en/docs/Developerguide/database-security-management.md b/content/en/docs/Developerguide/database-security-management.md new file mode 100644 index 000000000..520991efc --- /dev/null +++ b/content/en/docs/Developerguide/database-security-management.md @@ -0,0 +1,9 @@ +# Database Security Management + +- **[Client Access Authentication](client-access-authentication.md)** + +- **[Managing Users and Their Permissions](managing-users-and-their-permissions.md)** + +- **[Configuring Database Audit](configuring-database-audit.md)** + + diff --git a/content/en/docs/Developerguide/database-statement-execution-functions.md b/content/en/docs/Developerguide/database-statement-execution-functions.md new file mode 100644 index 000000000..98c4beba8 --- /dev/null +++ b/content/en/docs/Developerguide/database-statement-execution-functions.md @@ -0,0 +1,29 @@ +# **Database Statement Execution Functions** + +After the connection to the database server is successfully established, you can use the functions described in this section to execute SQL queries and commands. + +- **[PQclear](pqclear.md)** + +- **[PQexec](pqexec.md)** + +- **[PQexecParams](pqexecparams.md)** + +- **[PQexecParamsBatch](pqexecparamsbatch.md)** + +- **[PQexecPrepared](pqexecprepared.md)** + +- **[PQexecPreparedBatch](pqexecpreparedbatch.md)** + +- **[PQfname](pqfname.md)** + +- **[PQgetvalue](pqgetvalue.md)** + +- **[PQnfields](pqnfields.md)** + +- **[PQntuples](pqntuples.md)** + +- **[PQprepare](pqprepare.md)** + +- **[PQresultStatus](pqresultstatus.md)** + + diff --git a/content/en/docs/Developerguide/date-and-time-processing-functions-and-operators.md b/content/en/docs/Developerguide/date-and-time-processing-functions-and-operators.md new file mode 100644 index 000000000..5e645abd2 --- /dev/null +++ b/content/en/docs/Developerguide/date-and-time-processing-functions-and-operators.md @@ -0,0 +1,1336 @@ +# Date and Time Processing Functions and Operators + +## Date and Time Operators + +>![](public_sys-resources/icon-warning.gif) **WARNING:** +>When the user uses date/time operators, explicit type prefixes are modified for corresponding operands to ensure that the operands parsed by the database are consistent with what the user expects, and no unexpected results occur. +>For example, abnormal mistakes will occur in the following example without an explicit data type. +>``` +>SELECT date '2001-10-01' - '7' AS RESULT; +>``` + +**Table 1** Time and date operators + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Operator

+

Example

+

+

+
postgres=# SELECT date '2001-09-28' + integer '7' AS RESULT;
+       result        
+---------------------
+ 2001-10-05 00:00:00
+(1 row)
+
postgres=# SELECT date '2001-09-28' + interval '1 hour' AS RESULT;
+       result        
+---------------------
+ 2001-09-28 01:00:00
+(1 row)
+
postgres=# SELECT date '2001-09-28' + time '03:00' AS RESULT;
+       result        
+---------------------
+ 2001-09-28 03:00:00
+(1 row)
+
postgres=# SELECT interval '1 day' + interval '1 hour' AS RESULT;
+     result     
+----------------
+ 1 day 01:00:00
+(1 row)
+
postgres=# SELECT timestamp '2001-09-28 01:00' + interval '23 hours' AS RESULT;
+       result        
+---------------------
+ 2001-09-29 00:00:00
+(1 row)
+
postgres=# SELECT time '01:00' + interval '3 hours' AS RESULT;
+  result  
+----------
+ 04:00:00
+(1 row)
+

-

+
postgres=# SELECT date '2001-10-01' - date '2001-09-28' AS RESULT;
+ result 
+--------
+ 3 days
+(1 row)
+
postgres=# SELECT date '2001-10-01' - integer '7' AS RESULT;
+       result        
+---------------------
+ 2001-09-24 00:00:00
+(1 row)
+
postgres=# SELECT date '2001-09-28' - interval '1 hour' AS RESULT;
+       result        
+---------------------
+ 2001-09-27 23:00:00
+(1 row)
+
postgres=# SELECT time '05:00' - time '03:00' AS RESULT;
+  result  
+----------
+ 02:00:00
+(1 row)
+
postgres=# SELECT time '05:00' - interval '2 hours' AS RESULT;
+  result  
+----------
+ 03:00:00
+(1 row)
+
postgres=# SELECT timestamp '2001-09-28 23:00' - interval '23 hours' AS RESULT;
+       result        
+---------------------
+ 2001-09-28 00:00:00
+(1 row)
+
postgres=# SELECT interval '1 day' - interval '1 hour' AS RESULT;
+  result  
+----------
+ 23:00:00
+(1 row)
+
postgres=# SELECT timestamp '2001-09-29 03:00' - timestamp '2001-09-27 12:00' AS RESULT;
+     result     
+----------------
+ 1 day 15:00:00
+(1 row)
+

*

+
postgres=# SELECT 900 * interval '1 second' AS RESULT;
+  result  
+----------
+ 00:15:00
+(1 row)
+
postgres=# SELECT 21 * interval '1 day' AS RESULT;
+ result  
+---------
+ 21 days
+(1 row)
+
postgres=# SELECT double precision '3.5' * interval '1 hour' AS RESULT;
+  result  
+----------
+ 03:30:00
+(1 row)
+

/

+
postgres=# SELECT interval '1 hour' / double precision '1.5' AS RESULT;
+  result  
+----------
+ 00:40:00
+(1 row)
+
+ +## Time/Date Functions + +- age\(timestamp, timestamp\) + + Description: Subtracts arguments, producing a result in YYYY-MM-DD format. If the result is negative, the returned result is also negative. + + Return type: interval + + Example: + + ``` + postgres=# SELECT age(timestamp '2001-04-10', timestamp '1957-06-13'); + age + ------------------------- + 43 years 9 mons 27 days + (1 row) + ``` + +- age\(timestamp\) + + Description: Subtracts from **current\_date** + + Return type: interval + + Example: + + ``` + postgres=# SELECT age(timestamp '1957-06-13'); + age + ------------------------- + 60 years 2 mons 18 days + (1 row) + ``` + +- clock\_timestamp\(\) + + Description: Specifies the current timestamp of the real-time clock. + + Return type: timestamp with time zone + + Example: + + ``` + postgres=# SELECT clock_timestamp(); + clock_timestamp + ------------------------------- + 2017-09-01 16:57:36.636205+08 + (1 row) + ``` + +- current\_date + + Description: current date + + Return type: date + + Example: + + ``` + postgres=# SELECT current_date; + date + ------------ + 2017-09-01 + (1 row) + ``` + +- current\_time + + Description: current time + + Return type: time with time zone + + Example: + + ``` + postgres=# SELECT current_time; + timetz + -------------------- + 16:58:07.086215+08 + (1 row) + ``` + +- current\_timestamp + + Description: Specifies the current date and time. + + Return type: timestamp with time zone + + Example: + + ``` + postgres=# SELECT current_timestamp; + pg_systimestamp + ------------------------------ + 2017-09-01 16:58:19.22173+08 + (1 row) + ``` + +- date\_part\(text, timestamp\) + + Description: + + Obtains the hour. + + Equivalent to **extract\(field from timestamp\)**. + + Return type: double precision + + Example: + + ``` + postgres=# SELECT date_part('hour', timestamp '2001-02-16 20:38:40'); + date_part + ----------- + 20 + (1 row) + ``` + +- date\_part\(text, interval\) + + Description: Obtains the month. If the value is greater than 12, obtain the remainder after it is divided by 12. Equivalent to **extract\(field from timestamp\)**. + + Return type: double precision + + Example: + + ``` + postgres=# SELECT date_part('month', interval '2 years 3 months'); + date_part + ----------- + 3 + (1 row) + ``` + +- date\_trunc\(text, timestamp\) + + Description: Truncates to the precision specified by **text**. + + Return type: timestamp + + Example: + + ``` + postgres=# SELECT date_trunc('hour', timestamp '2001-02-16 20:38:40'); + date_trunc + --------------------- + 2001-02-16 20:00:00 + (1 row) + ``` + +- trunc\(timestamp\) + + Description: By default, the data is intercepted by day. + + Example: + + ``` + postgres=# SELECT trunc(timestamp '2001-02-16 20:38:40'); trunc + --------------------- + 2001-02-16 00:00:00 + (1 row) + ``` + +- extract\(field from timestamp\) + + Description: Obtains the hour. + + Return type: double precision + + Example: + + ``` + postgres=# SELECT extract(hour from timestamp '2001-02-16 20:38:40'); + date_part + ----------- + 20 + (1 row) + ``` + +- extract\(field from interval\) + + Description: Obtains the month. If the value is greater than 12, obtain the remainder after it is divided by 12. + + Return type: double precision + + Example: + + ``` + postgres=# SELECT extract(month from interval '2 years 3 months'); + date_part + ----------- + 3 + (1 row) + ``` + +- isfinite\(date\) + + Description: Tests for valid date. + + Return type: Boolean + + Example: + + ``` + postgres=# SELECT isfinite(date '2001-02-16'); + isfinite + ---------- + t + (1 row) + ``` + +- isfinite\(timestamp\) + + Description: Tests for valid timestamp. + + Return type: Boolean + + Example: + + ``` + postgres=# SELECT isfinite(timestamp '2001-02-16 21:28:30'); + isfinite + ---------- + t + (1 row) + ``` + +- isfinite\(interval\) + + Description: Tests for valid interval. + + Return type: Boolean + + Example: + + ``` + postgres=# SELECT isfinite(interval '4 hours'); + isfinite + ---------- + t + (1 row) + ``` + +- justify\_days\(interval\) + + Description: Sets the time interval in months \(30 days as a month\). + + Return type: interval + + Example: + + ``` + postgres=# SELECT justify_days(interval '35 days'); + justify_days + -------------- + 1 mon 5 days + (1 row) + ``` + +- justify\_hours\(interval\) + + Description: Sets the time interval in days \(24 hours is one day\). + + Return type: interval + + Example: + + ``` + postgres=# SELECT JUSTIFY_HOURS(INTERVAL '27 HOURS'); + justify_hours + ---------------- + 1 day 03:00:00 + (1 row) + ``` + +- justify\_interval\(interval\) + + Description: Adjusts **interval** using **justify\_days** and **justify\_hours**. + + Return type: interval + + Example: + + ``` + postgres=# SELECT JUSTIFY_INTERVAL(INTERVAL '1 MON -1 HOUR'); + justify_interval + ------------------ + 29 days 23:00:00 + (1 row) + ``` + +- localtime + + Description: current time + + Return type: time + + Example: + + ``` + postgres=# SELECT localtime AS RESULT; + result + ---------------- + 16:05:55.664681 + (1 row) + ``` + +- localtimestamp + + Description: Specifies the current date and time. + + Return type: timestamp + + Example: + + ``` + postgres=# SELECT localtimestamp; + timestamp + ---------------------------- + 2017-09-01 17:03:30.781902 + (1 row) + ``` + +- now\(\) + + Description: Specifies the current date and time. + + Return type: timestamp with time zone + + Example: + + ``` + postgres=# SELECT now(); + now + ------------------------------- + 2017-09-01 17:03:42.549426+08 + (1 row) + ``` + +- numtodsinterval\(num, interval\_unit\) + + Description: Converts a number to the interval type. **num** is a numeric-typed number. **interval\_unit** is a string in the following format: 'DAY' | 'HOUR' | 'MINUTE' | 'SECOND' + + You can set the [IntervalStyle](zone-and-formatting.md#en-us_topic_0237124733_en-us_topic_0059778109_s89302a8dcd7f46ecb7167574d6397dc0) parameter to **a** to be compatible with the interval output format of the function. + + Example: + + ``` + postgres=# SELECT numtodsinterval(100, 'HOUR'); + numtodsinterval + ----------------- + 100:00:00 + (1 row) + + postgres=# SET intervalstyle = a; + SET + postgres=# SELECT numtodsinterval(100, 'HOUR'); + numtodsinterval + ------------------------------- + +000000004 04:00:00.000000000 + (1 row) + ``` + +- pg\_sleep\(seconds\) + + Description: Server thread delay time, in seconds. + + Return type: void + + Example: + + ``` + postgres=# SELECT pg_sleep(10); + pg_sleep + ---------- + + (1 row) + ``` + +- statement\_timestamp\(\) + + Description: Specifies the current date and time. + + Return type: timestamp with time zone + + Example: + + ``` + postgres=# SELECT statement_timestamp(); + statement_timestamp + ------------------------------- + 2017-09-01 17:04:39.119267+08 + (1 row) + ``` + +- sysdate + + Description: Specifies the current date and time. + + Return type: timestamp + + Example: + + ``` + postgres=# SELECT sysdate; + sysdate + --------------------- + 2017-09-01 17:04:49 + (1 row) + ``` + +- timeofday\(\) + + Description: current date and time \(like clock\_timestamp, but returned as text\) + + Return type: text + + Example: + + ``` + postgres=# SELECT timeofday(); + timeofday + ------------------------------------- + Fri Sep 01 17:05:01.167506 2017 CST + (1 row) + ``` + +- transaction\_timestamp\(\) + + Description: current date and time \(equivalent to **current\_timestamp**\) + + Return type: timestamp with time zone + + Example: + + ``` + postgres=# SELECT transaction_timestamp(); + transaction_timestamp + ------------------------------- + 2017-09-01 17:05:13.534454+08 + (1 row) + ``` + +- add\_months\(d,n\) + + Description: Returns the date _date_ plus _integer_ months. + + Return type: timestamp + + Example: + + ``` + postgres=# SELECT add_months(to_date('2017-5-29', 'yyyy-mm-dd'), 11) FROM dual; + add_months + --------------------- + 2018-04-29 00:00:00 + (1 row) + ``` + +- last\_day\(d\) + + Description: Returns the date of the last day of the month that contains _date_. + + Return type: timestamp + + Example: + + ``` + postgres=# select last_day(to_date('2017-01-01', 'YYYY-MM-DD')) AS cal_result; + cal_result + --------------------- + 2017-01-31 00:00:00 + (1 row) + ``` + + +- next\_day\(x,y\) + + Description: Returns the time of the next week y started from x + + Return type: timestamp + + Example: + + ``` + postgres=# select next_day(timestamp '2017-05-25 00:00:00','Sunday')AS cal_result; + cal_result + --------------------- + 2017-05-28 00:00:00 + (1 row) + ``` + + +## TIMESTAMPDIFF + +**TIMESTAMPDIFF\(**_unit , timestamp\_expr1, timestamp\_expr2_**\)** + +The timestampdiff function returns the result of **timestamp\_expr2** - **timestamp\_expr1** in the specified unit. **timestamp\_expr1** and **timestamp\_expr2** must be value expressions of the **timestamp**, **timestamptz**, or **date** type. **unit** indicates the unit of the difference between two dates. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>This function is valid only when openGauss is compatible with the MY type \(that is, dbcompatibility = 'B'\). + +- year + + Year. + + ``` + postgres=# SELECT TIMESTAMPDIFF(YEAR, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 2 + (1 row) + ``` + + +- quarter + + Quarter. + + ``` + postgres=# SELECT TIMESTAMPDIFF(QUARTER, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 8 + (1 row) + ``` + +- month + + Month. + + ``` + postgres=# SELECT TIMESTAMPDIFF(MONTH, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 24 + (1 row) + ``` + +- week + + Week. + + ``` + postgres=# SELECT TIMESTAMPDIFF(WEEK, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 104 + (1 row) + ``` + +- day + + Day. + + ``` + postgres=# SELECT TIMESTAMPDIFF(DAY, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 730 + (1 row) + ``` + + +- hour + + Hour. + + ``` + postgres=# SELECT TIMESTAMPDIFF(HOUR, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); + timestamp_diff + ---------------- + 1 + (1 row) + + ``` + + +- minute + + Minute. + + ``` + postgres=# SELECT TIMESTAMPDIFF(MINUTE, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); + timestamp_diff + ---------------- + 61 + (1 row) + + ``` + + +- second + + Second. + + ``` + postgres=# SELECT TIMESTAMPDIFF(SECOND, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); + timestamp_diff + ---------------- + 3661 + (1 row) + + + ``` + + +- microseconds + + The seconds column, including fractional parts, multiplied by 1,000,000. + + ``` + postgres=# SELECT TIMESTAMPDIFF(MICROSECOND, '2020-01-01 10:10:10.000000', '2020-01-01 10:10:10.111111'); + timestamp_diff + ---------------- + 111111 + (1 row) + + ``` + + +## EXTRACT + +**EXTRACT\(**_field _**FROM **_source_**\)** + +The **extract** function retrieves subcolumns such as year or hour from date/time values. **source** must be a value expression of type **timestamp**, **time**, or **interval**. \(Expressions of type **date** are cast to **timestamp** and can therefore be used as well.\) **field** is an identifier or string that selects what column to extract from the source value. The **extract** function returns values of type **double precision**. The following are valid field names: + +- century + + + + The first century starts at 0001-01-01 00:00:00 AD. This definition applies to all Gregorian calendar countries. There is no century number 0. You go from **-1** century to **1** century. + + Example: + + ``` + postgres=# SELECT EXTRACT(CENTURY FROM TIMESTAMP '2000-12-16 12:21:13'); + date_part + ----------- + 20 + (1 row) + ``` + +- day + - For **timestamp** values, the day \(of the month\) column \(1–31\) + + ``` + postgres=# SELECT EXTRACT(DAY FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 16 + (1 row) + ``` + + - For **interval** values, the number of days + + ``` + postgres=# SELECT EXTRACT(DAY FROM INTERVAL '40 days 1 minute'); + date_part + ----------- + 40 + (1 row) + ``` + + +- decade + + Year column divided by 10 + + ``` + postgres=# SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 200 + (1 row) + ``` + +- dow + + Day of the week as Sunday\(**0**\) to Saturday \(**6**\) + + ``` + postgres=# SELECT EXTRACT(DOW FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 5 + (1 row) + ``` + +- doy + + Day of the year \(1–365 or 366\) + + ``` + postgres=# SELECT EXTRACT(DOY FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 47 + (1 row) + ``` + +- epoch + - For **timestamp with time zone** values, the number of seconds since 1970-01-01 00:00:00 UTC \(can be negative\); + + for **date** and **timestamp** values, the number of seconds since 1970-01-01 00:00:00 local time; + + for **interval** values, the total number of seconds in the interval. + + ``` + postgres=# SELECT EXTRACT(EPOCH FROM TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40.12-08'); + date_part + -------------- + 982384720.12 + (1 row) + ``` + + ``` + postgres=# SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); + date_part + ----------- + 442800 + (1 row) + ``` + + - Way to convert an epoch value back to a timestamp + + ``` + postgres=# SELECT TIMESTAMP WITH TIME ZONE 'epoch' + 982384720.12 * INTERVAL '1 second' AS RESULT; + result + --------------------------- + 2001-02-17 12:38:40.12+08 + (1 row) + ``` + + +- hour + + Hour column \(0–23\) + + ``` + postgres=# SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 20 + (1 row) + ``` + +- isodow + + Day of the week \(1–7\) + + Monday is 1 and Sunday is 7. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >This is identical to **dow** except for Sunday. + + ``` + postgres=# SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); + date_part + ----------- + 7 + (1 row) + ``` + +- isoyear + + The ISO 8601 year that the date falls in \(not applicable to intervals\). + + Each ISO year begins with the Monday of the week containing January 4, so in early January or late December the ISO year may be different from the Gregorian year. See the **week** column for more information. + + ``` + postgres=# SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-01'); + date_part + ----------- + 2005 + (1 row) + ``` + + ``` + postgres=# SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-02'); + date_part + ----------- + 2006 + (1 row) + ``` + +- microseconds + + The seconds column, including fractional parts, multiplied by 1,000,000 + + ``` + postgres=# SELECT EXTRACT(MICROSECONDS FROM TIME '17:12:28.5'); + date_part + ----------- + 28500000 + (1 row) + ``` + +- millennium + + + + Years in the 1900s are in the second millennium. The third millennium started from January 1, 2001. + + ``` + postgres=# SELECT EXTRACT(MILLENNIUM FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 3 + (1 row) + ``` + +- milliseconds + + The seconds column, including fractional parts, multiplied by 1000. Note that this includes full seconds. + + ``` + postgres=# SELECT EXTRACT(MILLISECONDS FROM TIME '17:12:28.5'); + date_part + ----------- + 28500 + (1 row) + ``` + +- minute + + Minutes column \(0–59\) + + ``` + postgres=# SELECT EXTRACT(MINUTE FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 38 + (1 row) + ``` + +- month + + For **timestamp** values, the number of the month within the year \(1–12\); + + ``` + postgres=# SELECT EXTRACT(MONTH FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 2 + (1 row) + ``` + + For **interval** values, the number of months, modulo 12 \(0–11\) + + ``` + postgres=# SELECT EXTRACT(MONTH FROM INTERVAL '2 years 13 months'); + date_part + ----------- + 1 + (1 row) + ``` + +- quarter + + Quarter of the year \(1–4\) that the date is in + + ``` + postgres=# SELECT EXTRACT(QUARTER FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 1 + (1 row) + ``` + +- second + + Seconds column, including fractional parts \(0–59\) + + ``` + postgres=# SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); + date_part + ----------- + 28.5 + (1 row) + ``` + +- timezone + + The time zone offset from UTC, measured in seconds. Positive values correspond to time zones east of UTC, negative values to zones west of UTC. + +- timezone\_hour + + The hour component of the time zone offset + +- timezone\_minute + + The minute component of the time zone offset + +- week + + The number of the week of the year that the day is in. By definition \(ISO 8601\), the first week of a year contains January 4 of that year. \(The ISO-8601 week starts on Monday.\) In other words, the first Thursday of a year is in week 1 of that year. + + Because of this, it is possible for early January dates to be part of the 52nd or 53rd week of the previous year, and late December dates to be part of the 1st week of the next year. For example, **2005-01-01** is part of the 53rd week of year 2004, **2006-01-01** is part of the 52nd week of year 2005, and **2012-12-31** is part of the 1st week of year 2013. You are advised to use the columns **isoyear** and **week** together to ensure consistency. + + ``` + postgres=# SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 7 + (1 row) + ``` + +- year + + Year column + + ``` + postgres=# SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 2001 + (1 row) + ``` + + +## date\_part + +The **date\_part** function is modeled on the traditional Ingres equivalent to the SQL-standard function **extract**: + +**date\_part\('**_field_**', **_source_**\)** + +Note that here the **field** parameter needs to be a string value, not a name. The valid field names for **field** are the same as for **extract**. For details, see [EXTRACT](#en-us_topic_0237121972_en-us_topic_0059779084_scb40477163d740de80f0e984cad28e7b). + +Example: + +``` +postgres=# SELECT date_part('day', TIMESTAMP '2001-02-16 20:38:40'); + date_part +----------- + 16 +(1 row) +``` + +``` +postgres=# SELECT date_part('hour', INTERVAL '4 hours 3 minutes'); + date_part +----------- + 4 +(1 row) +``` + +[Table 2](#en-us_topic_0237121972_en-us_topic_0059779084_t2e5425ae98464c3dab59352ff3bfa786) specifies the schema for formatting date and time values. + +**Table 2** Schema for formatting date and time + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Category

+

Pattern

+

Description

+

Hour

+

HH

+

Number of hours in one day (01-12)

+

HH12

+

Number of hours in one day (01-12)

+

HH24

+

Number of hours in one day (00-23)

+

Minute

+

MI

+

Minute (00-59)

+

Second

+

SS

+

Second (00-59)

+

FF

+

Microsecond (000000-999999)

+

SSSSS

+

Second after midnight (0-86399)

+

Morning and afternoon

+

AM or A.M.

+

Morning identifier

+

PM or P.M.

+

Afternoon identifier

+

Year

+

Y,YYY

+

Year with comma (with four digits or more)

+

SYYYY

+

Year with four digits BC

+

YYYY

+

Year (with four digits or more)

+

YYY

+

Last three digits of a year

+

YY

+

Last two digits of a year

+

Y

+

Last one digit of a year

+

IYYY

+

ISO year (with four digits or more)

+

IYY

+

Last three digits of an ISO year

+

IY

+

Last two digits of an ISO year

+

I

+

Last one digit of an ISO year

+

RR

+

Last two digits of a year (A year of the 20th century can be stored in the 21st century.)

+

RRRR

+

Capable of receiving a year with four digits or two digits. If there are 2 digits, the value is the same as the returned value of RR. If there are 4 digits, the value is the same as YYYY.

+
  • BC or B.C.
  • AD or A.D.
+

Era indicator Before Christ (BC) and After Christ (AD)

+

Month

+

MONTH

+

Full spelling of a month in uppercase (9 characters are filled in if the value is empty.)

+

MON

+

Month in abbreviated format in uppercase (with three characters)

+

MM

+

Month (01-12)

+

RM

+

Month in Roman numerals (I-XII; I=JAN) and uppercase

+

Day

+

DAY

+

Full spelling of a date in uppercase (9 characters are filled in if the value is empty.)

+

DY

+

Day in abbreviated format in uppercase (with three characters)

+

DDD

+

Day in a year (001-366)

+

DD

+

Day in a month (01-31)

+

D

+

Day in a week (1-7. Sunday is 1.)

+

Week

+

W

+

Week in a month (1-5) (The first week starts from the first day of the month.)

+

WW

+

Week in a year (1-53) (The first week starts from the first day of the year.)

+

IW

+

Week in an ISO year (The first Thursday is in the first week.)

+

Century

+

CC

+

Century (with two digits) (The 21st century starts from 2001-01-01.)

+

Julian date

+

J

+

Julian date (starting from January 1 of 4712 BC)

+

Quarter

+

Q

+

Quarter

+
+ +>![](public_sys-resources/icon-note.gif) **NOTE:** +>In the table, the rules for RR to calculate years are as follows: +>- If the range of the input two-digit year is between 00 and 49: +> If the last two digits of the current year are between 00 and 49, the first two digits of the returned year are the same as the first two digits of the current year. +> If the last two digits of the current year are between 50 and 99, the first two digits of the returned year equal to the first two digits of the current year plus 1. +>- If the range of the input two-digit year is between 50 and 99: +> If the last two digits of the current year are between 00 and 49, the first two digits of the returned year equal to the first two digits of the current year minus 1. +> If the last two digits of the current year are between 50 and 99, the first two digits of the returned year are the same as the first two digits of the current year. + diff --git a/content/en/docs/Developerguide/date-time-types.md b/content/en/docs/Developerguide/date-time-types.md new file mode 100644 index 000000000..beb583977 --- /dev/null +++ b/content/en/docs/Developerguide/date-time-types.md @@ -0,0 +1,616 @@ +# Date/Time Types + +[Table 1](#en-us_topic_0237121952_en-us_topic_0059779229_en-us_topic_0058965827_table60826369) lists the date/time types supported by openGauss. For the operators and built-in functions of the types, see [Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md). + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If the time format of another database is different from that of openGauss, modify the value of the **DateStyle** parameter to keep them consistent. + +**Table 1** Date/Time types + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Description

+

Storage Space

+

DATE

+

Specifies the date and time.

+

4 bytes (The actual storage space is 8 bytes.)

+

TIME [(p)] [WITHOUT TIME ZONE]

+

Specifies time within one day.

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

8 bytes

+

TIME [(p)] [WITH TIME ZONE]

+

Specifies time within one day (with time zone).

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

12 bytes

+

TIMESTAMP[(p)] [WITHOUT TIME ZONE]

+

Specifies the date and time.

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

8 bytes

+

TIMESTAMP[(p)][WITH TIME ZONE]

+

Specifies the date and time (with time zone). TIMESTAMP is also called TIMESTAMPTZ.

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

8 bytes

+

SMALLDATETIME

+

Date and time (without time zone)

+

The precision level is minute. A duration between 30s and 60s is rounded into 1 minute.

+

8 bytes

+

INTERVAL DAY (l) TO SECOND (p)

+

Specifies the time interval (X days X hours X minutes X seconds).

+
  • l: indicates the precision of days. The value ranges from 0 to 6. For compatibility, the precision functions are not supported.
  • p: indicates the precision of seconds. The value ranges from 0 to 6. The digit 0 at the end of a decimal number is not displayed.
+

16 bytes

+

INTERVAL [FIELDS] [ (p) ]

+

Time interval.

+
  • fields: YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, DAY TO HOUR, DAY TO MINUTE, DAY TO SECOND, HOUR TO MINUTE, HOUR TO SECOND, and MINUTE TO SECOND.
+
  • p: indicates the precision of seconds. The value ranges from 0 to 6. p takes effect only when fields are SECOND, DAY TO SECOND, HOUR TO SECOND, or MINUTE TO SECOND. The digit 0 at the end of a decimal number is not displayed.
+

12 bytes

+

reltime

+

Relative time interval. Command syntax:

+

X years X mons X days XX:XX:XX.

+
  • The Julian calendar is used. It specifies that a year has 365.25 days and a month has 30 days. The relative time interval needs to be calculated based on the input value. The output format is POSTGRES.
+

4 bytes

+
+ +Example: + +``` +-- Create a table. +postgres=# CREATE TABLE date_type_tab(coll date); + +-- Insert data. +postgres=# INSERT INTO date_type_tab VALUES (date '12-10-2010'); + +-- View data. +postgres=# SELECT * FROM date_type_tab; + coll +--------------------- + 2010-12-10 00:00:00 +(1 row) + +-- Delete the table. +postgres=# DROP TABLE date_type_tab; + +-- Create a table. +postgres=# CREATE TABLE time_type_tab (da time without time zone ,dai time with time zone,dfgh timestamp without time zone,dfga timestamp with time zone, vbg smalldatetime); + +-- Insert data. +postgres=# INSERT INTO time_type_tab VALUES ('21:21:21','21:21:21 pst','2010-12-12','2013-12-11 pst','2003-04-12 04:05:06'); + +-- View data. +postgres=# SELECT * FROM time_type_tab; + da | dai | dfgh | dfga | vbg +----------+-------------+---------------------+------------------------+--------------------- + 21:21:21 | 21:21:21-08 | 2010-12-12 00:00:00 | 2013-12-11 16:00:00+08 | 2003-04-12 04:05:00 +(1 row) + +-- Delete the table. +postgres=# DROP TABLE time_type_tab; + +-- Create a table. +postgres=# CREATE TABLE day_type_tab (a int,b INTERVAL DAY(3) TO SECOND (4)); + +-- Insert data. +postgres=# INSERT INTO day_type_tab VALUES (1, INTERVAL '3' DAY); + +-- View data. +postgres=# SELECT * FROM day_type_tab; + a | b +---+-------- + 1 | 3 days +(1 row) + +-- Delete the table. +postgres=# DROP TABLE day_type_tab; + +-- Create a table. +postgres=# CREATE TABLE year_type_tab(a int, b interval year (6)); + +-- Insert data. +postgres=# INSERT INTO year_type_tab VALUES(1,interval '2' year); + +-- View data. +postgres=# SELECT * FROM year_type_tab; + a | b +---+--------- + 1 | 2 years +(1 row) + +-- Delete the table. +postgres=# DROP TABLE year_type_tab; +``` + +## Date Input + +Date and time input is accepted in almost any reasonable formats, including ISO 8601, SQL-compatible, and traditional POSTGRES. The system allows you to customize the sequence of day, month, and year in the date input. Set the **DateStyle** parameter to **MDY** to select month-day-year interpretation, **DMY** to select day-month-year interpretation, or **YMD** to select year-month-day interpretation. + +Remember that any date or time literal input needs to be enclosed with single quotes, and the syntax is as follows: + +type \[ \( p \) \] 'value' + +The **p** that can be selected in the precision statement is an integer, indicating the number of fractional digits in the **seconds** column. [Table 2](#en-us_topic_0237121952_en-us_topic_0059779229_t0606b0d8bec74d34a6440ef1de8d1e67) shows some possible inputs for the **date** type. + +**Table 2** Date input + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Example

+

Description

+

1999-01-08

+

ISO 8601 (recommended format). January 8, 1999 in any mode

+

January 8, 1999

+

Unambiguous in any date input mode

+

1/8/1999

+

January 8 in MDY mode. August 1 in DMY mode

+

1/18/1999

+

January 18 in MDY mode, rejected in other modes

+

01/02/03

+
  • January 2, 2003 in MDY mode
  • February 1, 2003 in DMY mode
  • February 3, 2001 in YMD mode
+

1999-Jan-08

+

January 8 in any mode

+

Jan-08-1999

+

January 8 in any mode

+

08-Jan-1999

+

January 8 in any mode

+

99-Jan-08

+

January 8 in YMD mode, else error

+

08-Jan-99

+

January 8, except error in YMD mode

+

Jan-08-99

+

January 8, except error in YMD mode

+

19990108

+

ISO 8601. January 8, 1999 in any mode

+

990108

+

ISO 8601. January 8, 1999 in any mode

+

1999.008

+

Year and day of year

+

J2451187

+

Julian date

+

January 8, 99 BC

+

Year 99 BC

+
+ +Example: + +``` +-- Create a table. +postgres=# CREATE TABLE date_type_tab(coll date); + +-- Insert data. +postgres=# INSERT INTO date_type_tab VALUES (date '12-10-2010'); + +-- View data. +postgres=# SELECT * FROM date_type_tab; + coll +--------------------- + 2010-12-10 00:00:00 +(1 row) + +-- View the date format. +postgres=# SHOW datestyle; + DateStyle +----------- + ISO, MDY +(1 row) + +-- Set the date format. +postgres=# SET datestyle='YMD'; +SET + +-- Insert data. +postgres=# INSERT INTO date_type_tab VALUES(date '2010-12-11'); + +-- View data. +postgres=# SELECT * FROM date_type_tab; + coll +--------------------- + 2010-12-10 00:00:00 + 2010-12-11 00:00:00 +(2 rows) + +-- Delete the table. +postgres=# DROP TABLE date_type_tab; +``` + +## Date + +The time-of-day types are **TIME \[\(p\)\] \[WITHOUT TIME ZONE\]** and **TIME \[\(p\)\] \[WITH TIME ZONE\]**. **TIME** alone is equivalent to **TIME WITHOUT TIME ZONE**. + +If a time zone is specified in the input for **TIME WITHOUT TIME ZONE**, it is silently ignored. + +For details about the time input types, see [Table 3](#en-us_topic_0237121952_en-us_topic_0059779229_tc5d1089552ca4fb2a9f5ba27767a26b6). For details about time zone input types, see [Table 4](#en-us_topic_0237121952_en-us_topic_0059779229_te78a582bdc984cd3b5ecac5502f7793e). + +**Table 3** Time input + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Example

+

Description

+

05:06.8

+

ISO 8601

+

4:05:06

+

ISO 8601

+

4:05

+

ISO 8601

+

40506

+

ISO 8601

+

4:05 AM

+

Same as 04:05. AM does not affect the value.

+

4:05 PM

+

Same as 16:05. Input hours must be less than or equal to 12.

+

04:05:06.789-8

+

ISO 8601

+

04:05:06-08:00

+

ISO 8601

+

04:05-08:00

+

ISO 8601

+

040506-08

+

ISO 8601

+

04:05:06 PST

+

Time zone specified by abbreviation

+

2003-04-12 04:05:06 America/New_York

+

Time zone specified by full name

+
+ +**Table 4** Time zone input + + + + + + + + + + + + + + + + + + + + + + +

Example

+

Description

+

PST

+

Abbreviation (for Pacific Standard Time)

+

America/New_York

+

Full time zone name

+

-8:00

+

ISO-8601 offset for PST

+

-800

+

ISO-8601 offset for PST

+

-8

+

ISO-8601 offset for PST

+
+ +Example: + +``` +postgres=# SELECT time '04:05:06'; + time +---------- + 04:05:06 +(1 row) + +postgres=# SELECT time '04:05:06 PST'; + time +---------- + 04:05:06 +(1 row) + +postgres=# SELECT time with time zone '04:05:06 PST'; + timetz +------------- + 04:05:06-08 +(1 row) +``` + +## Special Values + +The special values supported by openGauss are converted to common date/time values when being read. For details, see [Table 5](#en-us_topic_0237121952_en-us_topic_0059779229_t8366745d681748c28d5a76843c7f0d4b). + +**Table 5** Special values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Input String

+

Applicable Type

+

Description

+

epoch

+

date, timestamp

+

1970-01-01 00:00:00+00 (Unix system time zero)

+

infinity

+

timestamp

+

Later than any other timestamps

+

-infinity

+

timestamp

+

Earlier than any other timestamps

+

now

+

date, time, timestamp

+

Start time of the current transaction

+

today

+

date, timestamp

+

Today midnight

+

tomorrow

+

date, timestamp

+

Tomorrow midnight

+

yesterday

+

date, timestamp

+

Yesterday midnight

+

allballs

+

time

+

00:00:00.00 UTC

+
+ +## Interval Input + +The input of **reltime** can be any valid interval in TEXT format. It can be a number \(negative numbers and decimals are also allowed\) or a specific time, which must be in SQL standard format, ISO-8601 format, or POSTGRES format. In addition, the text input needs to be enclosed with single quotation marks \(''\). + +For details, see [Interval input](#en-us_topic_0237121952_table1747116463276). + +**Table 6** Interval input + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Input Example

+

Output

+

Description

+

60

+

2 mons

+

Numbers are used to indicate intervals. The default unit is day. Decimals and negative numbers are also allowed. Particularly, a negative interval syntactically means how long before.

+

31.25

+

1 mons 1 days 06:00:00

+

-365

+

-12 mons -5 days

+

1 years 1 mons 8 days 12:00:00

+

1 years 1 mons 8 days 12:00:00

+

Intervals are in POSTGRES format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.

+

-13 months -10 hours

+

-1 years -25 days -04:00:00

+

-2 YEARS +5 MONTHS 10 DAYS

+

-1 years -6 mons -25 days -06:00:00

+

P-1.1Y10M

+

-3 mons -5 days -06:00:00

+

Intervals are in ISO-8601 format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.

+

+

-12H

+

-12:00:00

+
+ +Example: + +``` +-- Create a table. +postgres=# CREATE TABLE reltime_type_tab(col1 character(30), col2 reltime); + +-- Insert data. +postgres=# INSERT INTO reltime_type_tab VALUES ('90', '90'); +postgres=# INSERT INTO reltime_type_tab VALUES ('-366', '-366'); +postgres=# INSERT INTO reltime_type_tab VALUES ('1975.25', '1975.25'); +postgres=# INSERT INTO reltime_type_tab VALUES ('-2 YEARS +5 MONTHS 10 DAYS', '-2 YEARS +5 MONTHS 10 DAYS'); +postgres=# INSERT INTO reltime_type_tab VALUES ('30 DAYS 12:00:00', '30 DAYS 12:00:00'); +postgres=# INSERT INTO reltime_type_tab VALUES ('P-1.1Y10M', 'P-1.1Y10M'); + +-- View data. +postgres=# SELECT * FROM reltime_type_tab; + col1 | col2 +--------------------------------+------------------------------------- + 1975.25 | 5 years 4 mons 29 days + -2 YEARS +5 MONTHS 10 DAYS | -1 years -6 mons -25 days -06:00:00 + P-1.1Y10M | -3 mons -5 days -06:00:00 + -366 | -1 years -18:00:00 + 90 | 3 mons + 30 DAYS 12:00:00 | 1 mon 12:00:00 +(6 rows) + +-- Delete the table. +postgres=# DROP TABLE reltime_type_tab; +``` + diff --git a/content/en/docs/Developerguide/dbe_perf-schema.md b/content/en/docs/Developerguide/dbe_perf-schema.md new file mode 100644 index 000000000..36b2bc5b6 --- /dev/null +++ b/content/en/docs/Developerguide/dbe_perf-schema.md @@ -0,0 +1,41 @@ +# DBE\_PERF Schema + +In the **DBE\_PERF** schema, views are used to diagnose performance issues and are also the data source of WDR snapshots. After the database is installed, only the initial user and administrator have permissions for the **DBE\_PERF** schema by default. If the database is upgraded from an earlier version, permissions for the **DBE\_PERF** schema are the same as those of the earlier version to ensure forward compatibility. Organization views are divided based on multiple dimensions, such as OS, instance, and memory. These views comply with the following naming rules: + +- Views starting with **GLOBAL\_**: Request data from database nodes and return the data without processing them. +- Views starting with **SUMMARY\_**: Summarize data in openGauss. In most cases, data from database nodes \(sometimes only the primary database node\) is processed, collected, and returned. +- Views that do not start with **GLOBAL\_** or **SUMMARY\_**: Local views that do not request data from other database nodes. + +- **[OS](os.md)** + +- **[Instance](instance.md)** + +- **[Memory](memory-24.md)** + +- **[File](file.md)** + +- **[Object](object.md)** + +- **[Workload](workload.md)** + +- **[Session/Thread](session-thread.md)** + +- **[Transaction](transaction.md)** + +- **[Query](query.md)** + +- **[Cache/IO](cache-io.md)** + +- **[Utility](utility.md)** + +- **[Lock](lock-25.md)** + +- **[Wait Events](wait-events.md)** + +- **[Configuration](configuration.md)** + +- **[Operator](operator.md)** + +- **[Workload Manager](workload-manager.md)** + + diff --git a/content/en/docs/Developerguide/dcl-syntax-overview.md b/content/en/docs/Developerguide/dcl-syntax-overview.md new file mode 100644 index 000000000..bd1b75a12 --- /dev/null +++ b/content/en/docs/Developerguide/dcl-syntax-overview.md @@ -0,0 +1,78 @@ +# DCL Syntax Overview + +Data control language \(DCL\) is used to create users and roles and set or modify database users or role rights. + +## Defining a Role + +A role is used to manage permissions. For database security, management and operation permissions can be granted to different roles. For details about related SQL statements, see [Table 1](#en-us_topic_0237122051_en-us_topic_0059777960_tf1770f1724d84240998305bfca259f11). + +**Table 1** SQL statements for defining a role + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a role

+

CREATE ROLE

+

Altering role attributes

+

ALTER ROLE

+

Dropping a role

+

DROP ROLE

+
+ +## Defining a User + +A user is used to log in to a database. Different permissions can be granted to users for managing data accesses and operations of the users. For details about related SQL statements, see [Table 2](#en-us_topic_0237122051_en-us_topic_0059777960_t52a128d57b274569b95a3b35f6871348). + +**Table 2** SQL statements for defining a user + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a User

+

CREATE USER

+

Altering user attributes

+

ALTER USER

+

Dropping a user

+

DROP USER

+
+ +## Granting Rights + +openGauss provides a statement for granting rights to data objects and roles. For details, see [GRANT](grant.md). + +## Revoking Rights + +openGauss provides a statement for revoking rights. For details, see [REVOKE](revoke.md). + +## Setting Default Rights + +openGauss allows users to set rights for objects that will be created. For details, see [ALTER DEFAULT PRIVILEGES](alter-default-privileges.md). + diff --git a/content/en/docs/Developerguide/ddl-syntax-overview.md b/content/en/docs/Developerguide/ddl-syntax-overview.md new file mode 100644 index 000000000..631811c06 --- /dev/null +++ b/content/en/docs/Developerguide/ddl-syntax-overview.md @@ -0,0 +1,327 @@ +# DDL Syntax Overview + +Data definition language \(DDL\) is used to define or modify an object in a database, such as a table, an index, or a view. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>openGauss does not support DDL when the primary node of the database is incomplete. For example, if the primary node of the database in openGauss is faulty, creating a database or a table will fail. + +## Defining a Database + +A database is the warehouse for organizing, storing, and managing data. Defining a database includes: creating a database, altering the database attributes, and dropping the database. For details about related SQL statements, see [Table 1](#en-us_topic_0237122049_en-us_topic_0059777960_t986073679146430a8bce8bf0ea8f3607). + +**Table 1** SQL statements for defining a database + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a database

+

CREATE DATABASE

+

Altering database attributes

+

ALTER DATABASE

+

Dropping a database

+

DROP DATABASE

+
+ +## Defining a Schema + +A schema is the set of a group of database objects and is used to control the access to the database objects. For details about related SQL statements, see [Table 2](#en-us_topic_0237122049_en-us_topic_0059777960_t02977f28a9564837881f110b305d7509). + +**Table 2** SQL statements for defining a schema + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a schema

+

CREATE SCHEMA

+

Altering schema attributes

+

ALTER SCHEMA

+

Dropping a schema

+

DROP SCHEMA

+
+ +## Defining a Tablespace + +A tablespace is used to manage data objects and corresponds to a catalog on a disk. For details about related SQL statements, see [Table 3](#en-us_topic_0237122049_en-us_topic_0059777960_t9b028195c0d143f6b8fc7065af1ce2f9). + +**Table 3** SQL statements for defining a tablespace + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a tablespace

+

CREATE TABLESPACE

+

Altering tablespace attributes

+

ALTER TABLESPACE

+

Dropping a tablespace

+

DROP TABLESPACE

+
+ +## Defining a Table + +A table is a special data structure in a database and is used to store data objects and relationship between data objects. For details about related SQL statements, see [Table 4](#en-us_topic_0237122049_en-us_topic_0059777960_tcd92dbef720d4b7eaa5bf7a290b98605). + +**Table 4** SQL statements for defining a table + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a table

+

CREATE TABLE

+

Altering table attributes

+

ALTER TABLE

+

Dropping a table

+

DROP TABLE

+
+ +## Defining a Partitioned Table + +A partitioned table is a logical table used to improve query performance and does not store data \(data is stored in common tables\). For details about related SQL statements, see [Table 5](#en-us_topic_0237122049_en-us_topic_0059777960_t3ec179079c524dbaae801012f990a692). + +**Table 5** SQL statements for defining a partitioned table + + + + + + + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a partitioned table

+

CREATE TABLE PARTITION

+

Creating a partition

+

ALTER TABLE PARTITION

+

Altering partitioned table attributes

+

ALTER TABLE PARTITION

+

Deleting a partition

+

ALTER TABLE PARTITION

+

Dropping a partitioned table

+

DROP TABLE

+
+ +## Defining an Index + +An index indicates the sequence of values in one or more columns in a database table. It is a data structure that improves the speed of data access to specific information in a database table. For details about related SQL statements, see [Table 6](#en-us_topic_0237122049_en-us_topic_0059777960_te79920e4b7b849b7a64fb71029436d48). + +**Table 6** SQL statements for defining an index + + + + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating an index

+

CREATE INDEX

+

Altering index attributes

+

ALTER INDEX

+

Dropping an index

+

DROP INDEX

+

Rebuilding an index

+

REINDEX

+
+ +## Defining a Stored Procedure + +A stored procedure is a set of SQL statements for achieving specific functions and is stored in the database after compiling. Users can specify a name and provide parameters \(if necessary\) to execute the stored procedure. For details about related SQL statements, see [Table 7](#en-us_topic_0237122049_en-us_topic_0059777960_t0116270962694804b50796a5d6824f3b). + +**Table 7** SQL statements for defining a stored procedure + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a stored procedure

+

CREATE PROCEDURE

+

Dropping a stored procedure

+

DROP PROCEDURE

+
+ +## Defining a Function + +In openGauss, a function is similar to a stored procedure, which is a set of SQL statements. The function and stored procedure are used the same. For details about related SQL statements, see [Table 8](#en-us_topic_0237122049_en-us_topic_0059777960_tde31d523c25742e2aecc5ae8a17d561b). + +**Table 8** SQL statements for defining a function + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a function

+

CREATE FUNCTION

+

Altering function attributes

+

ALTER FUNCTION

+

Dropping a function

+

DROP FUNCTION

+
+ +## Defining a View + +A view is a virtual table exported from one or more basic tables. It is used to control data accesses of users. [Table 9](#en-us_topic_0237122049_en-us_topic_0059777960_td65563e06b1c491892dbad9b57f3b96d) lists the related SQL statements. + +**Table 9** SQL statements for defining a view + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a view

+

CREATE VIEW

+

Dropping a view

+

DROP VIEW

+
+ +## Defining a Cursor + +To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers pointing to context regions. With a cursor, the stored procedure can control alterations in context areas. For details, see [Table 10](#en-us_topic_0237122049_en-us_topic_0059777960_t191f977ebe0a4ab5b1348c888403e3b4). + +**Table 10** SQL statements for defining a cursor + + + + + + + + + + + + + + + + + + + +

Description

+

SQL Statement

+

Creating a cursor

+

CURSOR

+

Moving a cursor

+

MOVE

+

Fetching data from a cursor

+

FETCH

+

Closing a cursor

+

CLOSE

+
+ diff --git a/content/en/docs/Developerguide/deallocate.md b/content/en/docs/Developerguide/deallocate.md new file mode 100644 index 000000000..ad0249b17 --- /dev/null +++ b/content/en/docs/Developerguide/deallocate.md @@ -0,0 +1,33 @@ +# DEALLOCATE + +## Function + +**DEALLOCATE** deallocates a previously prepared statement. If you do not explicitly deallocate a prepared statement, it is deallocated when the session ends. + +The **PREPARE** keyword is always ignored. + +## Precautions + +None + +## Syntax + +``` +DEALLOCATE [ PREPARE ] { name | ALL }; +``` + +## Parameter Description + +- **name** + + Specifies the name of the prepared statement to be deallocated. + +- **ALL** + + Deallocates all prepared statements. + + +## Examples + +None + diff --git a/content/en/docs/Developerguide/debugging.md b/content/en/docs/Developerguide/debugging.md new file mode 100644 index 000000000..6a32bcfa4 --- /dev/null +++ b/content/en/docs/Developerguide/debugging.md @@ -0,0 +1,122 @@ +# Debugging + +## Syntax + +RAISE has the following five syntax formats: + +**Figure 1** raise\_format::= +![](figures/raise_format.png "raise_format") + +**Figure 2** raise\_condition::= +![](figures/raise_condition.png "raise_condition") + +**Figure 3** raise\_sqlstate::= +![](figures/raise_sqlstate.png "raise_sqlstate") + +**Figure 4** raise\_option::= +![](figures/raise_option.png "raise_option") + +**Figure 5** raise::= +![](figures/raise.png "raise") + +**Parameter description**: + +- The level option is used to specify the error level, that is, **DEBUG**, **LOG**, **INFO**, **NOTICE**, **WARNING**, or **EXCEPTION** \(default\). **EXCEPTION** throws an error that normally terminates the current transaction and the others only generate information at their levels. The [log\_min\_messages](logging-time.md#en-us_topic_0237124722_en-us_topic_0059778452_sc6c47ec8cc1b47e28be98dbb24b1b39a) and [client\_min\_messages](logging-time.md#en-us_topic_0237124722_en-us_topic_0059778452_s2955da1f1cb24b0aa68ddc77700233e0) parameters control whether the error messages of specific levels are reported to the client and are written to the server log. +- **format**: specifies the error message text to be reported, a format string. The format string can be appended with an expression for insertion to the message text. In a format string, **%** is replaced by the parameter value attached to format and **%%** is used to print **%**. For example: + + ``` + --v_job_id replaces % in the string. + RAISE NOTICE 'Calling cs_create_job(%)',v_job_id; + ``` + +- **option = expression**: inserts additional information to an error report. The keyword option can be **MESSAGE**, **DETAIL**, **HINT**, or **ERRCODE**, and each expression can be any string. + - **MESSAGE**: specifies the error message text. This option cannot be used in a **RAISE** statement that contains a format character string in front of **USING**. + - **DETAIL**: specifies detailed information of an error. + - **HINT**: prints hint information. + - **ERRCODE**: designates an error code \(SQLSTATE\) to a report. A condition name or a five-character SQLSTATE error code can be used. + +- **condition\_name**: specifies the condition name corresponding to the error code. +- **sqlstate**: specifies the error code. + +If neither a condition name nor an **SQLSTATE** is designated in a **RAISE EXCEPTION** command, the **RAISE EXCEPTION \(P0001\)** is used by default. If no message text is designated, the condition name or SQLSTATE is used as the message text by default. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If the **SQLSTATE** designates an error code, the error code is not limited to a defined error code. It can be any error code containing five digits or ASCII uppercase rather than **00000**. Do not use an error code ended with three zeros because this kind of error codes are type codes and can be captured by the whole category. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>The syntax described in [Figure 5](#en-us_topic_0237122256_en-us_topic_0059777683_f6b9d7253ecad413e9ee92ba78199a6b4) does not append any parameter. This form is used only for the **EXCEPTION** statement in a **BEGIN** block so that the error can be re-processed. + +## Example + +Display error and hint information when a transaction terminates: + +``` +CREATE OR REPLACE PROCEDURE proc_raise1(user_id in integer) +AS +BEGIN +RAISE EXCEPTION 'Noexistence ID --> %',user_id USING HINT = 'Please check your user ID'; +END; +/ + +call proc_raise1(300011); + +-- Execution result: +ERROR: Noexistence ID --> 300011 +HINT: Please check your user ID +``` + +Two methods are available for setting **SQLSTATE**: + +``` +CREATE OR REPLACE PROCEDURE proc_raise2(user_id in integer) +AS +BEGIN +RAISE 'Duplicate user ID: %',user_id USING ERRCODE = 'unique_violation'; +END; +/ + +\set VERBOSITY verbose +call proc_raise2(300011); + +-- Execution result: +ERROR: Duplicate user ID: 300011 +SQLSTATE: 23505 +LOCATION: exec_stmt_raise, pl_exec.cpp:3482 +``` + +If the main parameter is a condition name or **SQLSTATE**, the following applies: + +RAISE division\_by\_zero; + +RAISE SQLSTATE '22012'; + +For example: + +``` +CREATE OR REPLACE PROCEDURE division(div in integer, dividend in integer) +AS +DECLARE +res int; + BEGIN + IF dividend=0 THEN + RAISE division_by_zero; + RETURN; + ELSE + res := div/dividend; + RAISE INFO 'division result: %', res; + RETURN; + END IF; + END; +/ +call division(3,0); + +-- Execution result: +ERROR: division_by_zero +``` + +Alternatively: + +``` +RAISE unique_violation USING MESSAGE = 'Duplicate user ID: ' || user_id; +``` + diff --git a/content/en/docs/Developerguide/declare-syntax.md b/content/en/docs/Developerguide/declare-syntax.md new file mode 100644 index 000000000..c04c3544c --- /dev/null +++ b/content/en/docs/Developerguide/declare-syntax.md @@ -0,0 +1,9 @@ +# DECLARE Syntax + +- **[Basic Structure](basic-structure.md)** + +- **[Anonymous Blocks](anonymous-blocks.md)** + +- **[Subprogram](subprogram.md)** + + diff --git a/content/en/docs/Developerguide/declare.md b/content/en/docs/Developerguide/declare.md new file mode 100644 index 000000000..bb1f614ad --- /dev/null +++ b/content/en/docs/Developerguide/declare.md @@ -0,0 +1,91 @@ +# DECLARE + +## Function + +**DECLARE** defines a cursor to retrieve a small number of rows at a time out of a larger query and can be the start of an anonymous block. + +This section describes usage of cursors. The usage of anonymous blocks is available in [BEGIN](begin.md). + +To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers pointing to context regions. With cursors, stored procedures can control alterations in context regions. + +Generally, **CURSOR** and **SELECT** both have text returns. Since data is stored in binary format in the system, the system needs to convert the data from the binary format to the text format. If data is returned in text format, client applications need to convert the data back to the binary format for processing. **FETCH** implements conversion between binary data and text data. + +## Precautions + +- **CURSOR** is used only in transaction blocks. +- Binary cursors should be used carefully. Text usually occupies larger space than binary data. A binary cursor returns internal binary data, which is easier to operate. A text cursor returns text, which is easier to retrieve and therefore reduces workload on the client. As an example, if a query returns a value of one from an integer column, you would get a string of 1 with a default cursor, whereas with a binary cursor you would get a 4-byte field containing the internal representation of the value \(in big-endian byte order\). + +## Syntax + +- Define a cursor. + + ``` + DECLARE cursor_name [ BINARY ] [ NO SCROLL ] + CURSOR [ { WITH | WITHOUT } HOLD ] FOR query ; + ``` + +- Enable an anonymous block. + + ``` + [DECLARE [declare_statements]] + BEGIN + execution_statements + END; + / + ``` + + +## Parameter Description + +- **cursor\_name** + + Specifies the name of the cursor to be created. + + Value range: a string. It must comply with the naming convention. + +- **BINARY** + + Causes the cursor to return data in binary rather than in text format. + +- **NO SCROLL** + + Specifies how the cursor retrieves rows. + + - **NO SCROLL**: specifies that the cursor cannot be used to retrieve rows in a nonsequential fashion. + - Unspecified: Based on the query's execution plan, the system automatically determines whether the cursor can be used to retrieve rows in a nonsequential fashion. + +- **WITH HOLD** + + **WITHOUT HOLD** + + Specifies whether the cursor can continue to be used after the transaction that created it successfully commits. + + - **WITH HOLD**: The cursor can continue to be used after the transaction that created it successfully commits. + - **WITHOUT HOLD**: The cursor cannot be used outside of the transaction that created it. + - If neither **WITH HOLD** nor **WITHOUT HOLD** is specified, the default is **WITHOUT HOLD**. + +- **query** + + Uses a **SELECT** or **VALUES** clause to specify the rows to be returned by the cursor. + + Value range: **SELECT** or **VALUES** clause + +- **declare\_statements** + + Declares a variable, including its name and type, for example, **sales\_cnt int**. + +- **execution\_statements** + + Specifies the statement to be executed in an anonymous block. + + Value range: an existing function name + + +## Examples + +For details about how to define a cursor, see [Examples](fetch.md#en-us_topic_0237122165_en-us_topic_0059778422_s1ee72832a27547e4949061a010e24578) in **FETCH**. + +## Helpful Links + +[BEGIN](begin.md) and [FETCH](fetch.md) + diff --git a/content/en/docs/Developerguide/deep-copy.md b/content/en/docs/Developerguide/deep-copy.md new file mode 100644 index 000000000..80ba612d4 --- /dev/null +++ b/content/en/docs/Developerguide/deep-copy.md @@ -0,0 +1,13 @@ +# Deep Copy + +After data is imported, you can perform a deep copy to modify a partition key, change a row-store table to a column-store table, or add a partial cluster key. A deep copy re-creates a table and batch inserts data into the table. + +openGauss provides three deep copy methods. + +- **[Performing a Deep Copy by Using the CREATE TABLE Statement](performing-a-deep-copy-by-using-the-create-table-statement.md)** + +- **[Performing a Deep Copy by Using the CREATE TABLE LIKE Statement](performing-a-deep-copy-by-using-the-create-table-like-statement.md)** + +- **[Performing a Deep Copy by Creating a Temporary Table and Truncating the Original Table](performing-a-deep-copy-by-creating-a-temporary-table-and-truncating-the-original-table.md)** + + diff --git a/content/en/docs/Developerguide/default-mot-conf.md b/content/en/docs/Developerguide/default-mot-conf.md new file mode 100644 index 000000000..e89cc1445 --- /dev/null +++ b/content/en/docs/Developerguide/default-mot-conf.md @@ -0,0 +1,17 @@ +# Default MOT.conf + +The minimum settings and configuration specify to point the **Postgresql.conf** file to the location of the **MOT.conf** file: + +``` +Postgresql.conf +mot_config_file = '/tmp/gauss/ MOT.conf' +``` + +``` + +``` + +Ensure that the value of the max\_process\_memory setting is sufficient to include the global \(data and index\) and local \(sessions\) memory of MOT tables + +The default content of** MOT.conf **is sufficient to get started. The settings can be optimized later. + diff --git a/content/en/docs/Developerguide/default-permission-mechanism.md b/content/en/docs/Developerguide/default-permission-mechanism.md new file mode 100644 index 000000000..706ca1149 --- /dev/null +++ b/content/en/docs/Developerguide/default-permission-mechanism.md @@ -0,0 +1,14 @@ +# Default Permission Mechanism + +A user who creates an object is the owner of this object. By default, [Separation of Duties](separation-of-duties.md) is disabled after openGauss installation. A database system administrator has the same permissions as object owners. After an object is created, only the object owner or system administrator can query, modify, and delete the object, and grant permissions for the object to other users through [GRANT](grant.md) by default. + +To enable another user to use the object, grant required permissions to the user or the role that contains the user. + +openGauss supports the following permissions: **SELECT**, **INSERT**, **UPDATE**, **DELETE**, **TRUNCATE**, **REFERENCES**, **CREATE**, **CONNECT**, **EXECUTE**, and **USAGE**. Permission types are associated with object types. For permission details, see [GRANT](grant.md). + +To remove permissions, run **[REVOKE](revoke.md)**. Object owner permissions such as **ALTER**, **DROP**, **GRANT**, and **REVOKE** are implicit and cannot be granted or revoked. That is, you have the implicit permissions for an object if you are the owner of the object. Object owners can remove their own common permissions, for example, making tables read-only to themselves or others. + +System catalogs and views are visible to either system administrators or all users. System catalogs and views that require system administrator permissions can be queried only by system administrators. For details, see [System Catalogs and System Views](system-catalogs-and-system-views.md). + +The database provides the object isolation feature. If this feature is enabled, users can view only the objects \(tables, views, columns, and functions\) that they have the permission to access. System administrators are not affected by this feature. For details, see [ALTER DATABASE](alter-database.md). + diff --git a/content/en/docs/Developerguide/default-settings-of-client-connection.md b/content/en/docs/Developerguide/default-settings-of-client-connection.md new file mode 100644 index 000000000..40e6ec7e2 --- /dev/null +++ b/content/en/docs/Developerguide/default-settings-of-client-connection.md @@ -0,0 +1,9 @@ +# Default Settings of Client Connection + +- **[Statement Behavior](statement-behavior.md)** + +- **[Zone and Formatting](zone-and-formatting.md)** + +- **[Other Default Parameters](other-default-parameters.md)** + + diff --git a/content/en/docs/Developerguide/define-variable.md b/content/en/docs/Developerguide/define-variable.md new file mode 100644 index 000000000..085348488 --- /dev/null +++ b/content/en/docs/Developerguide/define-variable.md @@ -0,0 +1,56 @@ +# Define Variable + +This section describes the declaration of variables in the PL/SQL and the scope of this variable in codes. + +## Variable Declaration + +For details about the variable declaration syntax, see [Figure 1](#en-us_topic_0237122221_en-us_topic_0059777427_f6cc941e0c136457aade3860fc682cbbc). + +**Figure 1** declare\_variable::= +![](figures/declare_variable.png "declare_variable") + +The above syntax diagram is explained as follows: + +- **variable\_name** indicates the name of a variable. +- **type** indicates the type of a variable. +- **value** indicates the initial value of the variable. \(If the initial value is not given, NULL is taken as the initial value.\) **value** can also be an expression. + +**Examples** + +``` +postgres=# DECLARE + emp_id INTEGER := 7788; -- Define a variable and assign a value to it. +BEGIN + emp_id := 5*7784; -- Assign a value to the variable. +END; +/ +``` + +In addition to the declaration of basic variable types, **%TYPE** and **%ROWTYPE** can be used to declare variables related to table columns or table structures. + +**%TYPE Attribute** + +**%TYPE** declares a variable to be of the same data type as a previously declared variable \(for example, a column in a table\). For example, if you want to define a _my\_name_ variable whose data type is the same as the data type of the **firstname** column in the **employee** table, you can define the variable as follows: + +``` +my_name employee.firstname%TYPE +``` + +In this way, you can declare _my\_name_ without the need of knowing the data type of **firstname** in **employee**, and the data type of _my\_name_ can be automatically updated when the data type of **firstname** changes. + +**%ROWTYPE Attribute** + +**%ROWTYPE** declares data types of a set of data. It stores a row of table data or results fetched from a cursor. For example, if you want to define a set of data with the same column names and column data types as the **employee** table, you can define the data as follows: + +``` +my_employee employee%ROWTYPE +``` + +## Scope of a Variable + +The scope of a variable indicates the accessibility and availability of the variable in code block. In other words, a variable takes effect only within its scope. + +- To define a function scope, a variable must declare and create a **BEGIN-END** block in the declaration section. The necessity of such declaration is also determined by block structure, which requires that a variable has different scopes and lifetime during a process. +- A variable can be defined multiple times in different scopes, and inner definition can cover outer one. +- A variable defined in an outer block can also be used in a nested block. However, the outer block cannot access variables in the nested block. + diff --git a/content/en/docs/Developerguide/delete.md b/content/en/docs/Developerguide/delete.md new file mode 100644 index 000000000..0515144f6 --- /dev/null +++ b/content/en/docs/Developerguide/delete.md @@ -0,0 +1,106 @@ +# DELETE + +## Function + +**DELETE** deletes rows that satisfy the **WHERE** clause from the specified table. If the **WHERE** clause is absent, the effect is to delete all rows in the table. The result is a valid, but an empty table. + +## Precautions + +- You must have the **DELETE** permission on the table to delete from it, as well as the **SELECT** permission for any table in the **USING** clause or whose values are read in the **condition**. +- For row-store tables, the **DELETE** operation can be used only when they have primary key constraints. +- For column-store tables, the **RETURNING** clause is currently not supported. + +## Syntax + +``` +[ WITH [ RECURSIVE ] with_query [, ...] ] +DELETE FROM [ ONLY ] table_name [ * ] [ [ AS ] alias ] + [ USING using_list ] + [ WHERE condition | WHERE CURRENT OF cursor_name ] + [ RETURNING { * | { output_expr [ [ AS ] output_name ] } [, ...] } ]; +``` + +## Parameter Description + +- **WITH \[ RECURSIVE \] with\_query \[, ...\]** + + Specifies one or more subqueries that can be referenced by name in the main query, which is equivalent to a temporary table. + + If **RECURSIVE** is specified, it allows a **SELECT** subquery to reference itself by name. + + Format of **with\_query**: + + with\_query\_name \[ \( column\_name \[, ...\] \) \] AS + + \( \{select | values | insert | update | delete\} \) + + -- **with\_query\_name** specifies the name of the result set generated by a subquery. Such names can be used to access the subquery + + result set. + + – **column\_name** specifies the column name displayed in the subquery result set. + + – Each subquery can be a **SELECT**, **VALUES**, **INSERT**, **UPDATE** or **DELETE** statement. + +- **ONLY** + + If **ONLY** is specified before the table name, matching rows are deleted from the named table only. If **ONLY** is not specified, matching rows are also deleted from any tables inheriting from the named table. + +- **table\_name** + + Specifies the name \(optionally schema-qualified\) of the target table. + + Value range: an existing table name + +- **alias** + + Specifies a substitute name for the target table. + + Value range: a string. It must comply with the naming convention rule. + +- **using\_list** + + Specifies the **USING** clause. + +- **condition** + + Specifies an expression that returns a Boolean value. Only rows for which this expression returns **true** will be deleted. + +- **WHERE CURRENT OF cursor\_name** + + This parameter is reserved. + +- **output\_expr** + + Specifies an expression to be computed and returned by the **DELETE** statement after each row is deleted. The expression can use any column names of the table. Write **\*** to return all columns. + +- **output\_name** + + Specifies a name to use for a returned column. + + Value range: a string. It must comply with the naming convention rule. + + +## Examples + +``` +-- Create the tpcds.customer_address_bak table. +postgres=# CREATE TABLE tpcds.customer_address_bak AS TABLE tpcds.customer_address; + +-- Delete employees whose ca_address_sk is smaller than 14888 from the tpcds.customer_address_bak table. +postgres=# DELETE FROM tpcds.customer_address_bak WHERE ca_address_sk < 14888; + +-- Delete all data from the tpcds.customer_address_bak table. +postgres=# DELETE FROM tpcds.customer_address_bak; + +Delete the tpcds.customer_address_bak table. +postgres=# DROP TABLE tpcds.customer_address_bak; +``` + +## Suggestions + +- delete + + To delete all records in a table, use the **truncate** syntax. + + diff --git a/content/en/docs/Developerguide/deleting-data-from-a-table.md b/content/en/docs/Developerguide/deleting-data-from-a-table.md new file mode 100644 index 000000000..8664f0336 --- /dev/null +++ b/content/en/docs/Developerguide/deleting-data-from-a-table.md @@ -0,0 +1,32 @@ +# Deleting Data from a Table + +Outdated data may need to be deleted when tables are used. Data can be deleted from tables only by row. + +SQL statements can only access and delete an independent row by declaring conditions that match the row. If a table has a primary key, you can use it to specify a row. You can delete several rows that match the specified condition or delete all the rows from a table. + +For example, to delete all the rows whose **c\_customer\_sk** column is **3869** from the table **customer\_t1**, run the following command: + +``` +postgres=# DELETE FROM customer_t1 WHERE c_customer_sk = 3869; +``` + +To delete all rows from the table, run either of the following commands: + +``` +postgres=# DELETE FROM customer_t1; +``` + +``` +or, +postgres=# TRUNCATE TABLE customer_t1; +``` + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If you need to delete an entire table, you are advised to use the **TRUNCATE** statement rather than **DELETE**. + +To delete a table, run the following command: + +``` +postgres=# DROP TABLE customer_t1; +``` + diff --git a/content/en/docs/Developerguide/deployment.md b/content/en/docs/Developerguide/deployment.md new file mode 100644 index 000000000..5c8414b2f --- /dev/null +++ b/content/en/docs/Developerguide/deployment.md @@ -0,0 +1,11 @@ +# Deployment + +The following sections describe various mandatory and optional settings for optimal deployment. + +- **[Server Optimization – x86](server-optimization-x86.md)** + +- **[Server Optimization – ARM Huawei Taishan 4P](server-optimization-arm-huawei-taishan-4p.md)** + +- **[MOT Configuration Settings](mot-configuration-settings.md)** + + diff --git a/content/en/docs/Developerguide/description.md b/content/en/docs/Developerguide/description.md new file mode 100644 index 000000000..2782bc39d --- /dev/null +++ b/content/en/docs/Developerguide/description.md @@ -0,0 +1,99 @@ +# Description + +As described in [Overview](overview-16.md), **EXPLAIN** displays the execution plan, but will not actually run SQL statements. **EXPLAIN ANALYZE** and **EXPLAIN PERFORMANCE** both will actually run SQL statements and return the execution information. This section describes the execution plan and execution information in detail. + +## Execution Plans + +The following SQL statement is used as an example: + +``` +SELECT * FROM t1, t2 WHERE t1.c1 = t2.c2; +``` + +Run the **EXPLAIN** command and the output is as follows: + +![](figures/en-us_image_0252663634.png) + +**Interpretation of the execution plan level \(vertical\)**: + +1. Layer 1: **Seq Scan on t2** + + The table scan operator scans the table **t2** using **Seq Scan**. At this layer, data in the table **t2** is read from a buffer or disk, and then transferred to the upper-layer node for calculation. + +2. Layer 2: **Hash** + + Hash operator. It is used to calculate the hash value of the operator transferred from the lower layer for subsequent hash join operations. + +3. Layer 3: **Seq Scan on t1** + + The table scan operator scans the table **t1** using **Seq Scan**. At this layer, data in the table **t1** is read from a buffer or disk, and then transferred to the upper-layer node for hash join calculation. + +4. Layer 4: **Hash Join** + + Join operator. It is used to join data in the **t1** and **t2** tables using the hash join method and output the result data. + + +**Keywords in the execution plan**: + +1. Table access modes + - Seq Scan + + Scans all rows of the table in sequence. + + - Index Scan + + The optimizer uses a two-step plan: the child plan node visits an index to find the locations of rows matching the index condition, and then the upper plan node actually fetches those rows from the table itself. Fetching rows separately is much more expensive than reading them sequentially, but because not all pages of the table have to be visited, this is still cheaper than a sequential scan. The upper-layer planning node sorts index-identified rows based on their physical locations before reading them. This minimizes the independent capturing overhead. + + If there are separate indexes on multiple columns referenced in **WHERE**, the optimizer might choose to use an **AND** or **OR** combination of the indexes. However, this requires the visiting of both indexes, so it is not necessarily a win compared to using just one index and treating the other condition as a filter. + + The following Index scans featured with different sorting mechanisms are involved: + + - Bitmap Index Scan + + Fetches data pages using a bitmap. + + - Index Scan using index\_name + + Fetches table rows in index order, which makes them even more expensive to read. However, there are so few rows that the extra cost of sorting the row locations is unnecessary. This plan type is used mainly for queries fetching just a single row and queries having an **ORDER BY** condition that matches the index order, because no extra sorting step is needed to satisfy **ORDER BY**. + + +2. Table connection modes + - Nested Loop + + A nested loop is used for queries that have a smaller data set connected. In a nested loop join, the foreign table drives the internal table and each row returned from the foreign table should have a matching row in the internal table. The returned result set of all queries should be less than 10,000. The table that returns a smaller subset will work as a foreign table, and indexes are recommended for connection columns of the internal table. + + - \(Sonic\) Hash Join + + A hash join is used for large tables. The optimizer uses a hash join, in which rows of one table are entered into an in-memory hash table, after which the other table is scanned and the hash table is probed for matches to each row. Sonic and non-Sonic hash joins differ in their hash table structures, which do not affect the execution result set. + + - Merge Join + + In most cases, the execution performance of a merge join is lower than that of a hash join. However, if the source data has been pre-sorted and no more sorting is needed during the merge join, its performance excels. + +3. Operators + - sort + + Sorts the result set. + + - filter + + The **EXPLAIN** output shows the **WHERE** clause being applied as a **Filter** condition attached to the **Seq Scan** plan node. This means that the plan node checks the condition for each row it scans, and returns only the ones that meet the condition. The estimated number of output rows has been reduced because of the **WHERE** clause. However, the scan will still have to visit all 10,000 rows, as a result, the cost is not decreased. It increases a bit \(by 10,000 x **cpu\_operator\_cost**\) to reflect the extra CPU time spent on checking the **WHERE** condition. + + - LIMIT + + Limits the number of output execution results. If a **LIMIT** condition is added, not all rows are retrieved. + + + +## Execution Information + +The following SQL statement is used as an example: + +``` +select sum(t2.c1) from t1,t2 where t1.c1=t2.c2 group by t1.c2; +``` + +The output of running **EXPLAIN PERFORMANCE** is as follows: + +![](figures/en-us_image_0252660975.png) + diff --git a/content/en/docs/Developerguide/design-principles.md b/content/en/docs/Developerguide/design-principles.md new file mode 100644 index 000000000..0e8ac27f9 --- /dev/null +++ b/content/en/docs/Developerguide/design-principles.md @@ -0,0 +1,12 @@ +# Design Principles + +To achieve the requirements described above \(especially in an environment with many-cores\), our storage engine’s architecture implements the following techniques and strategies: + +- Data and indexes only reside in memory. +- Data and indexes are **not** laid out with physical partitions \(because these might achieve lower performance for certain types of applications\). +- Transaction concurrency control is based on Optimistic Concurrency Control \(OCC\) without any centralized contention points. See the ++ section for more information about OCC. +- Parallel Redo logs \(ultimately per core\) are used to efficiently avoid a central locking point. See the ++ section for more information about Parallel Redo logs. +- Indexes are lock-free. See the ++ section for more information about lock-free indexes. +- NUMA-awareness memory allocation is used to avoid cross-socket access, especially for session lifecycle objects. See the ++ section for more information about NUMA‑awareness. +- A customized MOT memory management allocator with pre-cached object pools is used to avoid expensive runtime allocation and extra points of contention. This dedicated MOT memory allocator makes memory allocation more efficient by pre‑accessing relatively large chunks of memory from the operation system as needed and then divvying it out to the MOT as needed. + diff --git a/content/en/docs/Developerguide/design.md b/content/en/docs/Developerguide/design.md new file mode 100644 index 000000000..0927c2cdd --- /dev/null +++ b/content/en/docs/Developerguide/design.md @@ -0,0 +1,18 @@ +# Design + +The key organizing principle is to eliminate unnecessary contention by reducing writes to shared memory. This variant of OCC achieves serializability, even after recovery, using CSN or periodically-updated epochs; epoch boundaries form natural serialization points. Epochs also help make garbage collection efficient and enable snapshot transactions. Several design choices, such as transaction ID design, record overwriting, and range query support, simplify and speed up transaction execution further, and the decentralized durability subsystem also avoids contention. + +SILO key technologies when running a transaction ‒ + +Transaction Private Memory for lock-less reads and writes, short locks only at commit. + +Very Low Contention. + +Low Latency. NUMA-aware local memory. + +Optimistic CC ‒ Minimal data locks, Low Contention. + +Auto-Vacuum with No Locks, No Overhead. + +Masstree implementation of Btree – extremely optimized + diff --git a/content/en/docs/Developerguide/determining-the-scope-of-performance-tuning.md b/content/en/docs/Developerguide/determining-the-scope-of-performance-tuning.md new file mode 100644 index 000000000..dbda7cdef --- /dev/null +++ b/content/en/docs/Developerguide/determining-the-scope-of-performance-tuning.md @@ -0,0 +1,46 @@ +# Determining the Scope of Performance Tuning + +Database performance tuning often happens when users are not satisfied with the service execution efficiency and want to improve the efficiency. The database performance is affected by many factors as described in section [Performance Elements](#en-us_topic_0237121484_en-us_topic_0073259659_en-us_topic_0040046511_section218827915473). Therefore, performance tuning is a complex process and sometimes cannot be systematically described or explained. It depends more on the database administrator's experience. However, this section still attempts to illustrate the performance tuning methods that can be referred to by application development personnel and new openGauss database administrators. + +## Performance Elements + +There are multiple performance factors that affect the database performance. Knowing these factors can help you identify and analyze performance-associated issues. + +- System resources + + Database performance greatly relies on disk I/O and memory usage. To accurately set performance counters, you need to have a knowledge of the basic performance of the hardware deployed in openGauss. Performance of hardware, such as the CPU, hard disk, disk controller, memory, and network interfaces, greatly affects database running speed. + +- Load + + The load indicates the total database system demands and it changes over time. The overall load contains user queries, applications, concurrent jobs, transactions, and system commands transferred at any time. For example, the system load increases if multiple users are executing multiple queries. The load will significantly affect the database performance. Identifying load peak hours helps improve resource utilization so that tasks are executed effectively. + +- Throughput + + The data processing capability of a database is defined by its throughput. Database throughput is measured by the number of queries or processed transactions per second or by the average response time. The database processing capacity is closely related to the underlying system performance \(disk I/O, CPU speed, and storage bandwidth\). You need to know about the hardware performance before setting a target throughput. + +- Competition + + Competition indicates that two or more load components try to use system resources in a conflicting way. For example, competition occurs when multiple queries attempt to update the same data at the same time, or when a large number of loads compete for system resources. When competition increases, the throughput decreases. + +- Optimization + + The database optimization can affect the performance of the whole system. Before executing the SQL statements, configuring database parameters, designing tables, and performing data distribution, enable the database query optimizer can help you obtain the most efficient execution plan. + + +## Determining the Tuning Scope + +Performance tuning depends on the usage of hardware resources, such as the CPU, memory, I/O, and network of each node in openGauss. Check whether these resources are fully utilized, and whether any bottlenecks exist, and then perform performance tuning as required. + +- If a resource reaches the bottleneck: + 1. Check whether the key OS parameters and database parameters are properly set and perform [System Optimization](system-optimization.md). + 2. Find the resource consuming SQL statements by querying the most time-consuming SQL statements and unresponsive SQL statements, and then perform [SQL Optimization](sql-optimization.md). + +- If no resource reaches the bottleneck, the system performance can be improved. In this case, query the most time-consuming SQL statements and the unresponsive SQL statements, and then perform [SQL Optimization](sql-optimization.md) as required. + +- **[Analyzing Hardware Bottlenecks](analyzing-hardware-bottlenecks.md)** +The CPU, memory, I/O, and network resource usage of each node in openGauss are obtained to check whether these resources are fully used and whether any bottleneck exists. +- **[Querying SQL Statements That Affect Performance Most](querying-sql-statements-that-affect-performance-most.md)** + +- **[Checking Blocked Statements](checking-blocked-statements.md)** + + diff --git a/content/en/docs/Developerguide/developer-options.md b/content/en/docs/Developerguide/developer-options.md new file mode 100644 index 000000000..89ea2ed0d --- /dev/null +++ b/content/en/docs/Developerguide/developer-options.md @@ -0,0 +1,662 @@ +# Developer Options + +## allow\_system\_table\_mods + +**Parameter description**: Specifies whether the structures of system tables can be modified. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the structures of system tables can be modified. +- **off** indicates that the structures of system tables cannot be modified. + +**Default value**: **off** + +## debug\_assertions + +**Parameter description:** Specifies whether to enable various assertion checks. This parameter assists in debugging. If you are experiencing strange problems or crashes, set this parameter to **on** to identify programming defects. To use this parameter, the macro USE\_ASSERT\_CHECKING must be defined \(through the configure option **--enable-cassert**\) during the openGauss compilation. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that various assertion checks are enabled. +- **off** indicates that various assertion checks are disabled. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>This parameter is set to **on** by default if openGauss is compiled with various assertion checks enabled. + +**Default value**: **off** + +## ignore\_checksum\_failure + +**Parameter description**: Specifies whether to ignore check failures \(but still generates an alarm\) and continues reading data. Continuing reading data may result in breakdown, damaged data being transferred or hidden, failure of data recovery from remote nodes, or other serious problems. You are not advised to modify the settings. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that data check errors are ignored. +- **off** indicates that data check errors are reported. + +**Default value**: **off** + +## enable\_force\_vector\_engine + +**Parameter description:** Specifies whether to forcibly generate vectorized execution plans for a vectorized execution operator if the operator's child node is a non-vectorized operator. When this parameter is set to **on**, vectorized execution plans are forcibly generated. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **off** + +## explain\_dna\_file + +**Parameter description:** Sets [explain\_perf\_mode](#en-us_topic_0237124743_en-us_topic_0059778871_s05e1286701bc4b8d9e1c0c9aecae3a0e) to **run** to export object files in CSV format. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>The value of this parameter must be an absolute path plus a file name with the extension **.csv**. + +**Value range**: a string + +**Default value**: empty + +## explain\_perf\_mode + +**Parameter description:** Specifies the display format of the **explain** command. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range:** **normal**, **pretty**, **summary**, and **run** + +- **normal** indicates that the default printing format is used. +- **pretty** indicates a new format improved by using openGauss. A new format contains a plan node ID, directly and effectively analyzing performance. +- **summary** indicates that the analysis result on this information is printed in addition to the printed information in the format specified by **pretty**. +- **run** indicates that in addition to the printed information specified by **summary**, the database exports the information as a CSV file. + +**Default value**: **pretty** + +## ignore\_system\_indexes + +**Parameter description:** specifies whether to ignore system indexes when reading system tables \(but still update the indexes when modifying the tables\). + +This parameter is a BACKEND parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>This parameter is useful for recovering data from tables whose system indexes are damaged. + +**Value range**: Boolean + +- **on** indicates that system indexes are ignored. +- **off** indicates that system indexes are not ignored. + +**Default value**: **off** + +## post\_auth\_delay + +**Parameter description:** Specifies the delay in the connection to the server after a successful authentication. Developers can attach a debugger to the server startup process. + +This parameter is a BACKEND parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 2147. The unit is s. + +**Default value**: **0** + +## pre\_auth\_delay + +**Parameter description**: Specifies the period of delaying authentication after the connection to the server is started. Developers can attach a debugger to the authentication procedure. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 60. The unit is s. + +**Default value**: **0** + +## trace\_notify + +**Parameter description:** Specifies whether to generate a large amount of debugging output for the **LISTEN** and **NOTIFY** commands. The level of [client\_min\_messages](logging-time.md#en-us_topic_0237124722_en-us_topic_0059778452_s2955da1f1cb24b0aa68ddc77700233e0) or [log\_min\_messages](logging-time.md#en-us_topic_0237124722_en-us_topic_0059778452_sc6c47ec8cc1b47e28be98dbb24b1b39a) must be **DEBUG1** or lower to send such output to the client or server logs, respectively. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the function is enabled. +- **off** indicates that the function is disabled. + +**Default value**: **off** + +## trace\_recovery\_messages + +**Parameter description**: Specifies whether to enable logging of recovery-related debugging output. This parameter allows users to overwrite the normal setting of [log\_min\_messages](logging-time.md#en-us_topic_0237124722_en-us_topic_0059778452_sc6c47ec8cc1b47e28be98dbb24b1b39a), but only for specific messages. This is intended for the use in debugging the standby server. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: enumerated values. Valid values include **debug5**, **debug4**, **debug3**, **debug2**, **debug1**, and **log**. For details about the parameter values, see [log\_min\_messages](logging-time.md#en-us_topic_0237124722_en-us_topic_0059778452_sc6c47ec8cc1b47e28be98dbb24b1b39a). + +**Default value**: **log** + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>- **log** indicates that recovery-related debugging information will not be logged. +>- Except the default value **log**, each of the other values indicates that recovery-related debugging information at the specified level will also be logged. Common settings of **log\_min\_messages** will unconditionally record information into server logs. + +## trace\_sort + +**Parameter description**: Specifies whether to print information about resource usage during sorting operations. This parameter is available only when the macro TRACE\_SORT is defined during the openGauss compilation. However, TRACE\_SORT is currently defined by default. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the function is enabled. +- **off** indicates that the function is disabled. + +**Default value**: **off** + +## zero\_damaged\_pages + +Parameter description: Specifies whether to detect a damaged page header that causes openGauss to report an error, aborting the current transaction. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +Setting this parameter to **on** causes the system to report a warning, zero out the damaged page, and continue processing. This behavior will destroy data, including all the rows on the damaged page. However, it allows you to bypass the error and retrieve rows from any undamaged pages that may be present in the table. Therefore, it is useful for restoring data if corruption has occurred due to a hardware or software error. In most cases, you are advised not to set this parameter to **on** unless you do not want to restore data from the damaged pages of a table. + +**Default value**: **off** + +## string\_hash\_compatible + +**Parameter description:** specifies whether to use the same method to calculate char-type hash values and varchar- or text-type hash values. Based on the setting of this parameter, you can determine whether a redistribution is required when a distribution column is converted from a char-type data distribution into a varchar- or text-type data distribution. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the same calculation method is used and a redistribution is not required. +- **off** indicates that different calculation methods are used and a redistribution is required. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>Calculation methods differ in the length of input strings used for calculating hash values. \(For a char-type hash value, spaces following a string are not counted as the length. For a text- or varchar-type hash value, the spaces are counted.\) The hash value affects the calculation result of queries. To avoid query errors, do not modify this parameter during database running once it is set. + +**Default value**: **off** + +## cost\_param + +**Parameter description:** Controls use of different estimation methods in specific customer scenarios, allowing estimated values approximating to onsite values. This parameter can control various methods simultaneously by performing AND \(&\) on the bit of each method. A method is selected if the result value is not **0**. + +When **cost\_param & 1** is set to a value other than **0**, an improved mechanism is used for estimating the selection rate of non-equi-joins. This method is more accurate for estimating the selection rate of joins between two identical tables. At present, **cost\_param & 1=0** is not used. That is, a better formula is selected for calculation. + +When **cost\_param & 2** is set to a value other than **0**, the selection rate is estimated based on multiple filter criteria. The lowest selection rate among all filter criteria, but not the product of the selection rates for two tables under a specific filter criterion, is used as the total selection rate. This method is more accurate when a close correlation exists between the columns to be filtered. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range:** an integer ranging from 1 to _INT\_MAX_ + +**Default value**: **0** + +## convert\_string\_to\_digit + +**Parameter description:** Specifies the implicit conversion priority, which determines whether to preferentially convert strings into numbers. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that strings are preferentially converted into numbers. +- **off** indicates that strings are not preferentially converted into numbers. + +**Default value**: **on** + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Modify this parameter only when absolutely necessary because the modification will change the rule for converting internal data types and may cause unexpected results. + +## nls\_timestamp\_format + +**Parameter description:** Specifies the default timestamp format. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +**Default value**: **DD-Mon-YYYY HH:MI:SS.FF AM** + +## remotetype + +**Parameter description**: Specifies the remote connection type. + +This parameter is a BACKEND parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: enumerated values. Valid values are **application**, **datanode**, and **internaltool**. + +**Default value**: **application** + +## enable\_partitionwise + +**Parameter description:** Specifies whether to select an intelligent algorithm for joining partition tables. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that an intelligent algorithm is selected. +- **off** indicates that an intelligent algorithm is not selected. + +**Default value**: **off** + +## max\_function\_args + +**Parameter description**: Specifies the maximum number of parameters allowed for a function. + +This parameter is an INTERNAL parameter. The value of this parameter cannot be modified. + +**Value range**: an integer. + +**Default value**: **666** + +## max\_user\_defined\_exception + +**Parameter description**: Specifies the maximum number of exceptions. The default value cannot be changed. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +Value range: an integer. Currently, only the fixed value **1000** is supported. + +**Default value**: **1000** + +## enable\_debug\_vacuum + +**Parameter description**: Specifies whether to allow output of some VACUUM-related logs for problem locating. This parameter is used only by developers. Common users are advised not to use it. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on/true** indicates that output of VACUUM-related logs is allowed. +- **off/false** indicates that output of VACUUM-related logs is disallowed. + +**Default value**: **off** + +## enable\_global\_stats + +**Parameter description**: Specifies the current statistics collection mode, which can be global statistics collection or single-node statistics collection. By default, the global statistics collection mode is used. If this parameter is set to **off**, the statistics of the first node in the openGauss are collected by default. In this case, the quality of the generated query plan may be affected. However, the information collection performance is optimal. Therefore, exercise caution when disabling this parameter. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** or **true** indicates the global statistics mode. +- **off** or **false** indicates the database node statistics. + +**Default value**: **on** + +## enable\_fast\_numeric + +**Parameter description:** Specifies whether to enable optimization for numeric data calculation. Calculation of numeric data is time-consuming. Numeric data is converted into int64- or int128-type data to improve numeric data calculation performance. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** or **true** indicates that optimization for numeric data calculation is enabled. +- **off** or **false** indicates that optimization for numeric data calculation is disabled. + +**Default value**: **on** + +## rewrite\_rule + +**Parameter description**: Specifies the rewriting rule for enabled optional queries. Some query rewrite rules are optional. Enabling them cannot always improve the query efficiency. In a specific customer scenario, you can set the query rewriting rules through the GUC parameter to achieve optimal query efficiency. + +This parameter can control the combination of query rewriting rules, for example, there are over one override rules: rule1, rule2, rule3, and rule4. To set the parameters, you can perform the following operations: + +``` +set rewrite_rule=rule1; -- Enable query rewriting rule rule1 +set rewrite_rule=rule2, rule3; -- Enable the query rewriting rules rule2 and rule3 +set rewrite_rule=none; -- Disable all optional query rewriting rules +``` + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +- **none**: Does not use any optional query rewriting rules +- **lazyagg**: Uses the Lazy Agg query rewriting rules for eliminating aggregation operations in subqueries. + +**Default value**: **magicset** + +## enable\_compress\_spill + +**Parameter description**: Specifies whether to enable the compression function of writing data to disk. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** or **true** indicates that optimization for writing data to disk is enabled. +- **off** or **false** indicates that optimization for writing data to disk is disabled. + +**Default value**: **on** + +## analysis\_options + +**Parameter description**: Specifies whether to enable function options in the corresponding options to use the corresponding location functions, including data verification and performance statistics. For details, see the options in the value range. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +- **LLVM\_COMPILE** indicates that the codegen compilation time of each thread is displayed on the explain performance page. +- **HASH\_CONFLICT** indicates that the log file in the **pg\_log** directory of the database node process displays the hash table statistics, including the hash table size, hash chain length, and hash conflict information. +- **STREAM\_DATA\_CHECK** indicates that a CRC check is performed on data before and after network data transmission. + +**Default value**: **ALL,on\(\),off\(LLVM\_COMPILE,HASH\_CONFLICT,STREAM\_DATA\_CHECK\)**, which indicates that no location function is enabled. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Use **on\(\)** or** off\(\)** to enable or disable the functions. The function options that are not displayed retain the original values. Format for reference: +>'on\(option1, option2, ...\)' +>'off\(ALL\)' + +## resource\_track\_log + +**Parameter description**: Specifies the log level of self-diagnosis. Currently, this parameter takes effect only in multi-column statistics. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string + +- **summary**: Brief diagnosis information is displayed. +- **detail**: Detailed diagnosis information is displayed. + +Currently, the two parameter values differ only when there is an alarm about multi-column statistics not collected. If the parameter is set to **summary**, such an alarm will not be displayed. If it is set to **detail**, such an alarm will be displayed. + +**Default value**: **summary** + +## udf\_memory\_limit + +**Parameter description**: Controls the maximum physical memory that can be used when each database node executes UDFs. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer. The value range is from 200 x 1024 to _max\_process\_memory_ and the unit is KB. + +**Default value**: **200MB** + +## FencedUDFMemoryLimit + +**Parameter description**: Specifies the virtual memory used by each fenced udf worker process. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Suggestion**: You are not advised to set this parameter. You can set [udf\_memory\_limit](#en-us_topic_0237124743_section1765913299426) instead. + +Value range: an integer ranging from 0 KB to 2147483647 KB. The unit can also be MB or GB. **0** indicates that the memory is not limited. + +**Default value**: **0** + +## UDFWorkerMemHardLimit + +**Parameter description**: Specifies the maximum value of **fencedUDFMemoryLimit**. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Suggestion**: You are not advised to set this parameter. You can set [udf\_memory\_limit](#en-us_topic_0237124743_section1765913299426) instead. + +Value range: an integer ranging from 0KB to 2147483647KB. The unit can also be MB or GB. + +**Default value**: **1GB** + +## pljava\_vmoptions + +**Parameter description**: Specifies the startup parameters for JVMs used by the PL/Java function. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string, supporting: + +- JDK8 JVM startup parameters. For details, see JDK [official ](https://docs.oracle.com/javase/8/docs/technotes/tools/unix/java.html)descriptions. +- JDK8 JVM system attributes \(starting with **–D**, for example, **–Djava.ext.dirs**\). For details, see JDK [official ](https://docs.oracle.com/javase/tutorial/deployment/doingMoreWithRIA/properties.html)descriptions. +- User-defined parameters \(starting with **–D**, for example, **–Duser.defined.option**\). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>If **pljava\_vmoptions** is set to a value beyond the value range, an error will be reported when PL/Java functions are used. For details, see [PL/pgSQL Functions](pl-pgsql-functions.md). + +**Default value**: empty + +## enable\_pbe\_optimization + +**Parameter description**: Specifies whether the optimizer optimizes the query plan for statements executed in Parse Bind Execute \(PBE\) mode. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the optimizer optimizes the query plan. +- **off** indicates that the optimizer does not optimize the execution. + +**Default value**: **on** + +## enable\_light\_proxy + +**Parameter description**: Specifies whether the optimizer optimizes the execution of simple queries on the primary node of the databases. This parameter does not take effect if the character set of the application side does not match that of the kernel side. You are advised to set the character set to UTF8 when creating a database. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the optimizer optimizes the execution of simple queries on the primary node of the databases. +- **off** indicates that the optimizer does not optimize the execution. + +**Default value**: **on** + +## enable\_global\_plancache + +**Parameter description**: Specifies whether to share the cache of the PBE query execution plan. Enabling this function can reduce the memory usage of database nodes in high concurrency scenarios. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the execution plan of the PBE query is shared in the cache. +- **off** indicates that the execution plan of the PBE query is not shared in the cache. + +**Default value**: **off** + +## checkpoint\_flush\_after + +**Parameter description**: Specifies the number of consecutive disk pages that the checkpointer writer thread writes before asynchronous flush. In openGauss, the disk page size is 8 KB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 256. **0** indicates that the asynchronous flush function is disabled. For example, if the value is **32**, the checkpointer thread continuously writes 32 disk pages \(that is, 32 x 8 = 256 KB\) before asynchronous flush. + +**Default value**: **32** + +## bgwriter\_flush\_after + +**Parameter description**: Specifies the number of consecutive disk pages that the background writer thread writes before asynchronous flush. In openGauss, the disk page size is 8 KB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 256. **0** indicates that the asynchronous flush function is disabled. The size of a single page is 8 KB. For example, if the value is **64**, the background writer thread continuously writes 64 disk pages \(that is, 64 x 8 = 512 KB\) before asynchronous flush. + +**Default value**: **256KB** \(32 pages\) + +## backend\_flush\_after + +**Parameter description**: Specifies the number of consecutive disk pages that the backend thread writes before asynchronous flush. In openGauss, the disk page size is 8 KB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 256. **0** indicates that the asynchronous flush function is disabled. The size of a single page is 8 KB. For example, if the value is **64**, the backend thread continuously writes 64 disk pages \(that is, 64 x 8 = 512 KB\) before asynchronous flush. + +**Default value**: **0** + +## enable\_parallel\_ddl + +**Parameter description**: Specifies whether multiple database nodes can concurrently perform DDL operations on the same database object. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on**: DDL operations can be concurrently performed without distributed deadlocks. +- **off**: Distributed deadlocks may occur when DDL operations are concurrently performed. + +**Default value**: **on** + +## show\_acce\_estimate\_detail + +**Parameter description**: The evaluation information is generally used by O&M personnel during maintenance, and it may affect the output display of the **EXPLAIN** statement. Therefore, this parameter is disabled by default. The evaluation information is displayed only if the **verbose** option of the **EXPLAIN** statement is enabled. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the evaluation information is displayed in the output of the **EXPLAIN** statement. +- **off** indicates that the evaluation information is not displayed in the output of the **EXPLAIN** statement. + + +**Default value**: **off** + +## enable\_prevent\_job\_task\_startup + +**Parameter description**: Specifies whether to start the job thread. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the job thread is not started. +- **off** indicates that the job thread is started. + +**Default value**: **off** + +## enable\_early\_free + +**Parameter description**: Specifies whether the operator memory can be released in advance. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the operator memory can be released in advance. +- **off** indicates that the operator memory cannot be released in advance. + +**Default value**: **on** + +## support\_batch\_bind + +**Parameter description**: Specifies whether to batch bind and execute PBE statements through interfaces such as JDBC, ODBC, and Libpq. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that batch binding and execution are used. +- **off** indicates that batch binding and execution are not used. + +**Default value**: **on** + +## check\_implicit\_conversions + +**Parameter description**: Specifies whether to check candidate index paths generated for index columns that have implicit type conversions in a query. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that a check will be performed for candidate index paths generated for index columns that have implicit type conversion in a query. +- **off** indicates that a check will not be performed. + +**Default value**: **off** + +## enable\_thread\_pool + +**Parameter description**: Specifies whether to enable the thread pool function. This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the thread pool function is enabled. +- **off** indicates that the thread pool function is disabled. + +**Default value**: **off** + +## thread\_pool\_attr + +**Parameter description**: Specifies the detailed attributes of the thread pool function. This parameter is valid only when **enable\_thread\_pool** is set to **on**. This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string, consisting of one or more characters. + +This parameter consists of three parts: thread\_num, group\_num, and cpubind\_info. The meanings of the three parts are as follows: + +- **thread\_num** indicates the total number of threads in the thread pool. The value ranges from 0 to 4096. The value **0** indicates that the database automatically configures the number of threads in the thread pool based on the number of CPU cores. If the value is greater than **0**, the number of threads in the thread pool is the same as the value of **thread\_num**. +- **group\_num** indicates the number of thread groups in the thread pool. The value ranges from 0 to 64. The value **0** indicates that the database automatically configures the number of thread groups in the thread pool based on the number of NUMA groups. If the value is greater than **0**, the number of thread groups in the thread pool is the same as the value of **group\_num**. +- **cpubind\_info** indicates whether the thread pool is bound to a core. The available configuration modes are as follows: 1. '\(nobind\)': The thread is not bound to a core. 2. '\(allbind\)': Use all CPU cores that can be queried in the current system to bind threads. 3. '\(nodebind: 1, 2\)': Use the CPU cores in NUMA groups 1 and 2 to bind threads. 4. '\(cpubind: 0-30\)': Use the CPU cores 0 to 30 to bind threads. This parameter is case-insensitive. + +**Default value**: **'16, 2, \(nobind\)'** + +## numa\_distribute\_mode + +**Parameter description**: Specifies the distribution of some shared data and threads among NUMA nodes. This parameter is used to optimize the performance of large-scale ARM servers with multiple NUMA nodes. Generally, you do not need to set this parameter. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: a string. The valid values are **none** and **all**. + +- **none** indicates that this function is disabled. +- **all** indicates that some shared data and threads are distributed to different NUMA nodes to reduce the number of remote access times and improve performance. Currently, this function applies only to ARM servers with multiple NUMA nodes. All NUMA nodes must be available for database processes. You cannot select only some NUMA nodes. + +**Default value**: **none** + +## log\_pagewriter + +**Parameter description**: Specifies whether to display the page refresh information of a thread and details about an incremental check point after the incremental check point is enabled. You are not advised to set this parameter to **true** because a large amount of information will be generated. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +**Default value**: **off** + +## enable\_opfusion + +**Parameter description**: Specifies whether to optimize simple queries. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: Boolean + +- **on** indicates that the performance logs are output. +- **off** indicates that the performance logs are not output. + +**Default value**: **on** + +## advance\_xlog\_file\_num + +**Parameter description**: Specifies the number of Xlog files that are periodically initialized in advance in the background. This parameter is used to prevent the Xlog file initialization from affecting the performance during transaction submission. However, such a fault may occur only when the system is overloaded. Therefore, you do not need to set this parameter. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from 0 to 100. The value **0** indicates that initialization is not performed in advance. For example, the value **10** indicates that the background thread periodically initializes 10 Xlog files in advance based on the write location of the current Xlog. + +**Default value**: **0** + +## enable\_beta\_opfusion + +**Parameter description**: Specifies whether to accelerate the execution of SQL statements, such as aggregate functions, sorting, and nestloop join, in TPC-C when **enable\_opfusion** is set to **on**. For the nestloop join SQL statements, **enable\_beta\_nestloop\_fusion** must be also set to **on**. + +**Value range**: Boolean + +**Default value**: **off** + +## enable\_beta\_nestloop\_fusion + +**Parameter description**: Specifies whether to accelerate the execution of nestloop join SQL statements in TPC-C when both **enable\_opfusion** and **enable\_beta\_opfusion** are set to **on**. + +**Value range**: Boolean + +**Default value**: **off** + diff --git a/content/en/docs/Developerguide/development-based-on-jdbc.md b/content/en/docs/Developerguide/development-based-on-jdbc.md new file mode 100644 index 000000000..792c27b02 --- /dev/null +++ b/content/en/docs/Developerguide/development-based-on-jdbc.md @@ -0,0 +1,31 @@ +# Development Based on JDBC + +Java Database Connectivity \(JDBC\) is a Java API for running SQL statements. It provides unified access interfaces for different relational databases, based on which applications process data. The openGauss library supports JDBC 4.0 and requires JDK 1.8 for code compiling. It does not support JDBC-ODBC bridge. + +- **[JDBC Package, Driver Class, and Environment Class](jdbc-package-driver-class-and-environment-class.md)** + +- **[Development Process](development-process.md)** + +- **[Loading the Driver](loading-the-driver.md)** + +- **[Connecting to a Database](connecting-to-a-database-0.md)** + +- **[Connecting to the Database \(Using SSL\)](connecting-to-the-database-(using-ssl).md)** + +- **[Running SQL Statements](running-sql-statements.md)** + +- **[Processing Data in a Result Set](processing-data-in-a-result-set.md)** + +- **[Closing a Connection](closing-a-connection.md)** + +- **[Example: Common Operations](example-common-operations.md)** + +- **[Example: Retrying SQL Queries for Applications](example-retrying-sql-queries-for-applications.md)** + +- **[Example: Importing and Exporting Data Through Local Files](example-importing-and-exporting-data-through-local-files.md)** + +- **[Example 2: Migrating Data from a MY Database to openGauss](example-2-migrating-data-from-a-my-database-to-opengauss.md)** + +- **[JDBC Interface Reference](jdbc-interface-reference.md)** + + diff --git a/content/en/docs/Developerguide/development-based-on-libpq.md b/content/en/docs/Developerguide/development-based-on-libpq.md new file mode 100644 index 000000000..b71d6514c --- /dev/null +++ b/content/en/docs/Developerguide/development-based-on-libpq.md @@ -0,0 +1,6 @@ +# Development Based on libpq + +openGauss does not verify the use of libpq interfaces in application development. You are not advised to use this set of interfaces for application development, because underlying risks probably exist. You can use the ODBC or JDBC interface instead. + + + diff --git a/content/en/docs/Developerguide/development-based-on-odbc.md b/content/en/docs/Developerguide/development-based-on-odbc.md new file mode 100644 index 000000000..23527050f --- /dev/null +++ b/content/en/docs/Developerguide/development-based-on-odbc.md @@ -0,0 +1,61 @@ +# Development Based on ODBC + +Open Database Connectivity \(ODBC\) is a Microsoft API for accessing databases based on the X/OPEN CLI. Applications interact with the database through the APIs provided by ODBC, which enhances their portability, scalability, and maintainability. + +[Figure 1](#fig1255101034110) shows the system structure of ODBC. + +**Figure 1** ODBC system structure +![](figures/odbc-system-structure.png "odbc-system-structure") + +openGauss supports ODBC 3.5 in the following environments. + +**Table 1** OSs Supported by ODBC + + + + + + + + + + + + + + + + + + + +

OS

+

Platform

+

CentOS 6.4/6.5/6.6/6.7/6.8/6.9/7.0/7.1/7.2/7.3/7.4

+

x86_64

+

CentOS 7.6

+

ARM64

+

EulerOS 2.0 SP2/SP3

+

x86_64

+

EulerOS 2.0 SP8

+

ARM64

+
+ +The ODBC Driver Manager running on UNIX or Linux can be unixODBC or iODBC. unixODBC-2.3.0 is used as the component for connecting the database. + +Windows has a native ODBC Driver Manager. You can locate **Data Sources \(ODBC\)** by choosing **Control Panel** \> **Administrative Tools**. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>The current database ODBC driver is based on an open-source version and may be incompatible with data types tinyint, smalldatetime, and nvarchar2. + +- **[ODBC Packages, Dependent Libraries, and Header Files](odbc-packages-dependent-libraries-and-header-files.md)** + +- **[Configuring a Data Source in the Linux OS](configuring-a-data-source-in-the-linux-os.md)** + +- **[Development Process](development-process-1.md)** + +- **[Example](example.md)** + +- **[ODBC Interface Reference](odbc-interface-reference.md)** + + diff --git a/content/en/docs/Developerguide/development-process-1.md b/content/en/docs/Developerguide/development-process-1.md new file mode 100644 index 000000000..e77a5e85c --- /dev/null +++ b/content/en/docs/Developerguide/development-process-1.md @@ -0,0 +1,104 @@ +# Development Process + +**Figure 1** ODBC-based application development process +![](figures/odbc-based-application-development-process.png "odbc-based-application-development-process") + +## APIs Involved in the Development Process + +**Table 1** API description + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Function

+

API

+

Allocate a handle

+

SQLAllocHandle is a generic function for allocating a handle. It can replace the following functions:

+ +

Set environment attributes

+

SQLSetEnvAttr

+

Set connection attributes

+

SQLSetConnectAttr

+

Set statement attributes

+

SQLSetStmtAttr

+

Connect to a data source

+

SQLConnect

+

Bind a buffer to a column in the result set

+

SQLBindCol

+

Bind the parameter marker of an SQL statement to a buffer

+

SQLBindParameter

+

Return the error message of the last operation

+

SQLGetDiagRec

+

Prepare an SQL statement for execution

+

SQLPrepare

+

Run a prepared SQL statement

+

SQLExecute

+

Run an SQL statement directly

+

SQLExecDirect

+

Fetch the next row (or rows) from the result set

+

SQLFetch

+

Return data in a column of the result set

+

SQLGetData

+

Get the column information from a result set

+

SQLColAttribute

+

Disconnect from a data source

+

SQLDisconnect

+

Release a handle

+

SQLFreeHandle is a generic function for releasing a handle. It can replace the following functions:

+ +
+ +>![](public_sys-resources/icon-note.gif) **NOTE:** +>If an execution request \(not in a transaction block\) received in the database contains multiple statements, the request is packed into a transaction. If one of the statements fails, the entire request will be rolled back. + diff --git a/content/en/docs/Developerguide/development-process.md b/content/en/docs/Developerguide/development-process.md new file mode 100644 index 000000000..554c7b22f --- /dev/null +++ b/content/en/docs/Developerguide/development-process.md @@ -0,0 +1,5 @@ +# Development Process + +**Figure 1** Application development process based on JDBC +![](figures/application-development-process-based-on-jdbc.png "application-development-process-based-on-jdbc") + diff --git a/content/en/docs/Developerguide/development-specifications.md b/content/en/docs/Developerguide/development-specifications.md new file mode 100644 index 000000000..f2795b512 --- /dev/null +++ b/content/en/docs/Developerguide/development-specifications.md @@ -0,0 +1,9 @@ +# Development Specifications + +If the connection pool mechanism is used during application development, comply with the following specifications: + +- If GUC parameters are set in the connection, run **SET SESSION AUTHORIZATION DEFAULT;RESET ALL;** to clear the connection status before you return the connection to the connection pool. +- If a temporary table is used, delete the temporary table before you return the connection to the connection pool. + +If you do not do so, the connection in the connection pool will be stateful, which affects subsequent operations on the connection pool. + diff --git a/content/en/docs/Developerguide/dictionaries.md b/content/en/docs/Developerguide/dictionaries.md new file mode 100644 index 000000000..b850b0175 --- /dev/null +++ b/content/en/docs/Developerguide/dictionaries.md @@ -0,0 +1,17 @@ +# Dictionaries + +- **[Overview](overview-19.md)** + +- **[Stop Words](stop-words.md)** + +- **[Simple Dictionary](simple-dictionary.md)** + +- **[Synonym Dictionary](synonym-dictionary.md)** + +- **[Thesaurus Dictionary](thesaurus-dictionary.md)** + +- **[Ispell Dictionary](ispell-dictionary.md)** + +- **[Snowball Dictionary](snowball-dictionary.md)** + + diff --git a/content/en/docs/Developerguide/disk-space.md b/content/en/docs/Developerguide/disk-space.md new file mode 100644 index 000000000..9bc786dc6 --- /dev/null +++ b/content/en/docs/Developerguide/disk-space.md @@ -0,0 +1,29 @@ +# Disk Space + +This section describes the disk space parameters, which are used to set limits on the disk space for storing temporary files. + +## sql\_use\_spacelimit + +**Parameter description**: Specifies the space size for files to be flushed to disks when a single SQL statement is executed on a single database node. The managed space includes the space occupied by ordinary tables, temporary tables, and intermediate result sets to be flushed to disks. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +**Value range**: an integer ranging from –1 to 2147483647. The unit is KB. **–1** indicates no limit. + +**Default value:** **–1** + +## temp\_file\_limit + +**Parameter description**: Specifies the limit on the size of a temporary file spilled to disk in a session. The temporary file can be a sort or hash temporary file, or the storage file for a held cursor. + +This is a session-level setting. + +This parameter is a SUSET parameter. Set it based on instructions provided in [Table 1](resetting-parameters.md#en-us_topic_0237121562_en-us_topic_0059777490_t91a6f212010f4503b24d7943aed6d846). + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>This parameter does not apply to disk space used for temporary tables during the SQL query process. + +**Value range**: an integer ranging from –1 to 2147483647. The unit is KB. **–1** indicates no limit. + +**Default value**: **–1** + diff --git a/content/en/docs/Developerguide/disk-ssd.md b/content/en/docs/Developerguide/disk-ssd.md new file mode 100644 index 000000000..bf876f244 --- /dev/null +++ b/content/en/docs/Developerguide/disk-ssd.md @@ -0,0 +1,15 @@ +# Disk/SSD + +The following describes how to ensure that disk R/W performance is suitable for database synchronous commit mode. + +To do so, test your disk bandwidth using the following: + +``` +[...]$ sync; dd if=/dev/zero of=testfile bs=1M count=1024; sync +1024+0 records in +1024+0 records out +1073741824 bytes (1.1 GB) copied, 1.36034 s, 789 MB/s +``` + +In case the disk bandwidth is significantly below the above number \(789 MB/s\), it may create a performance bottleneck for openGauss, and especially for MOT. + diff --git a/content/en/docs/Developerguide/dml-syntax-overview.md b/content/en/docs/Developerguide/dml-syntax-overview.md new file mode 100644 index 000000000..90316dc6b --- /dev/null +++ b/content/en/docs/Developerguide/dml-syntax-overview.md @@ -0,0 +1,60 @@ +# DML Syntax Overview + +Data manipulation language \(DML\) is used to perform operations on data in database tables, such as inserting, updating, querying, or deleting data. + +## Inserting Data + +Inserting data refers to adding one or multiple records to a database table. For details, see [INSERT](insert.md). + +## Updating Data + +Updating data refers to modifying one or multiple records in a database table. For details, see [UPDATE](update.md). + +## Querying Data + +The database query statement **SELECT** is used to search required information in a database. For details, see [SELECT](select.md). + +## Deleting Data + +openGauss provides two statements for deleting data from database tables. To delete data meeting specified conditions from a database table, see [DELETE](delete.md). To delete all data from a database table, see [TRUNCATE](truncate.md). + +**TRUNCATE** can quickly delete all data from a database table, which achieves the effect same as that running **DELETE** to delete data without specifying conditions from each table. Deletion efficiency using **TRUNCATE** is faster because **TRUNCATE** does not scan tables. Therefore, **TRUNCATE** is useful in large tables. + +## Copying Data + +openGauss provides a statement for copying data between tables and files. For details, see [COPY](copy.md). + +## Locking a Table + +openGauss provides multiple lock modes to control concurrent accesses to table data. For details, see [LOCK](lock.md). + +## Calling a Function + +openGauss provides three statements for calling functions. These statements are the same in the syntax structure. For details, see [CALL](call.md). + +## Session Management + +A session is a connection established between the user and the database. [Table 1](#en-us_topic_0237122050_en-us_topic_0059777960_t320d7e04ba33427cbe2132b994ef6cb2) lists the related SQL statements. + +**Table 1** SQL statements related to sessions + + + + + + + + + + + + + +

Function

+

SQL Statement

+

Altering a session

+

ALTER SESSION

+

Killing a session

+

ALTER SYSTEM KILL SESSION

+
+ diff --git a/content/en/docs/Developerguide/do.md b/content/en/docs/Developerguide/do.md new file mode 100644 index 000000000..ca05bbd1d --- /dev/null +++ b/content/en/docs/Developerguide/do.md @@ -0,0 +1,51 @@ +# DO + +## Function + +**DO** executes an anonymous code block. + +The code block is treated as though it were the body of a function with no parameters, returning **void**. It is parsed and executed a single time. + +## Precautions + +- he procedural language to be used must already have been installed into the current database by means of **CREATE LANGUAGE**. **plpgsql** is installed by default, but other languages are not. +- The user must have the **USAGE** permission on the procedural language, or must be a system administrator if the language is untrusted. + +## Syntax + +``` +DO [ LANGUAGE lang_name ] code; +``` + +## Parameter Description + +- **lang\_name** + + Specifies the name of the procedural language the code is written in. If omitted, the default is **plpgsql**. + +- **code** + + Specifies the procedural language code to be executed. This must be specified as a string literal. + + +## Example + +``` +-- Create the webuser user. +postgres=# CREATE USER webuser PASSWORD 'Bigdata@123'; + +-- Grant all permissions on all views in the tpcds schema to the webuser user. +postgres=# DO $$DECLARE r record; +BEGIN + FOR r IN SELECT c.relname table_name,n.nspname table_schema FROM pg_class c,pg_namespace n + WHERE c.relnamespace = n.oid AND n.nspname = 'tpcds' AND relkind IN ('r','v') + LOOP + EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser'; + END LOOP; +END$$; + + +-- Delete the webuser user. +postgres=# DROP USER webuser CASCADE; +``` + diff --git a/content/en/docs/Developerguide/doing-vacuum-to-a-table.md b/content/en/docs/Developerguide/doing-vacuum-to-a-table.md new file mode 100644 index 000000000..7eae44c74 --- /dev/null +++ b/content/en/docs/Developerguide/doing-vacuum-to-a-table.md @@ -0,0 +1,17 @@ +# Doing VACUUM to a Table + +If a large number of rows were updated or deleted during import, run **VACUUM FULL** before **ANALYZE**. A large number of UPDATE and DELETE operations generate huge disk page fragments, which reduces query efficiency. **VACUUM FULL** can restore disk page fragments and return them to the OS. + +1. Run the **VACUUM FULL** statement. + + Do **VACUUM FULL** to the **product\_info** table. + + ``` + postgres=# VACUUM FULL product_info + ``` + + ``` + VACUUM + ``` + + diff --git a/content/en/docs/Developerguide/drop-data-source.md b/content/en/docs/Developerguide/drop-data-source.md new file mode 100644 index 000000000..846d89acc --- /dev/null +++ b/content/en/docs/Developerguide/drop-data-source.md @@ -0,0 +1,51 @@ +# DROP DATA SOURCE + +## Function + +**DROP DATA SOURCE** deletes a data source. + +## Important Notes + +Only an owner, system administrator, or initial user can delete a data source. + +## Syntax + +``` +DROP DATA SOURCE [IF EXISTS] src_name [CASCADE | RESTRICT]; +``` + +## Parameter Description + +- **src\_name** + + Specifies the name of the data source to be deleted. + + Value range: a string. It must comply with the naming convention. + +- **IF EXISTS** + + Reports a notice instead of an error if the specified data source does not exist. + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects that depend on the data source. + - **RESTRICT**: refuses to delete the data source if any objects depend on it. This is the default action. + + Currently, no objects depend on data sources. Therefore, **CASCADE** is equivalent to **RESTRICT**, and they are reserved to ensure backward compatibility. + + + +## Examples + +``` +-- Create a data source. +postgres=# CREATE DATA SOURCE ds_tst1; + +-- Delete the data source. +postgres=# DROP DATA SOURCE ds_tst1 CASCADE; +postgres=# DROP DATA SOURCE IF EXISTS ds_tst1 RESTRICT; +``` + +## Helpful Links + +[CREATE DATA SOURCE](create-data-source.md) and [DROP DATA SOURCE](drop-data-source.md) + diff --git a/content/en/docs/Developerguide/drop-database.md b/content/en/docs/Developerguide/drop-database.md new file mode 100644 index 000000000..aaffab384 --- /dev/null +++ b/content/en/docs/Developerguide/drop-database.md @@ -0,0 +1,51 @@ +# DROP DATABASE + +## Function + +**DROP DATABASE** deletes a database. + +## Precautions + +- Only the owner of a database or a system administrator has the **DROP DATABASE** permission. +- The preinstalled POSTGRES, TEMPLATE0, and TEMPLATE1 databases are protected and therefore cannot be deleted. To check databases in the current service, run the gsql statement **\\l**. +- If any users are connected to the database, the database cannot be deleted. +- **DROP DATABASE** cannot be executed within a transaction block. +- If **DROP DATABASE** fails and is rolled back, run **DROP DATABASE IF EXISTS** again. + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>**DROP DATABASE** cannot be undone. + +## Syntax + +``` +DROP DATABASE [ IF EXISTS ] database_name ; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified database does not exist. + +- **database\_name** + + Specifies the name of the database to be deleted. + + Value range: an existing database name + + +## Examples + +See [Examples](create-database.md#en-us_topic_0237122099_en-us_topic_0059778277_s6be7b8abbb4b4aceb9dae686434d672c) in **CREATE DATABASE**. + +## Helpful Links + +[CREATE DATABASE](create-database.md) + +## Suggestions + +- drop database + + Do not delete databases during transactions. + + diff --git a/content/en/docs/Developerguide/drop-directory.md b/content/en/docs/Developerguide/drop-directory.md new file mode 100644 index 000000000..d53db11a2 --- /dev/null +++ b/content/en/docs/Developerguide/drop-directory.md @@ -0,0 +1,39 @@ +# DROP DIRECTORY + +## Function + +**DROP Directory** deletes a synonym. + +## Precautions + +By default, only the initial user can perform the drop operation. When **enable\_access\_server\_directory** \([enable\_access\_server\_directory](operation-auditing.md#en-us_topic_0237124747_section4279164545515)\), users with the **sysadmin** permission can also perform the drop operation. + +## Syntax + +``` +DROP DIRECTORY [ IF EXISTS ] directory_name; +``` + +## Parameter Description + +- **directory\_name** + + Specifies the name of the directory to be deleted. + + Value range: an existing directory name + + +## Example + +``` +-- Create a directory. +postgres=# CREATE OR REPLACE DIRECTORY dir as '/tmp/'; + +-- Delete a directory. +postgres=# DROP DIRECTORY dir; +``` + +## Helpful Links + +[CREATE DIRECTORY](create-directory.md) and [ALTER DIRECTORY](alter-directory.md) + diff --git a/content/en/docs/Developerguide/drop-function.md b/content/en/docs/Developerguide/drop-function.md new file mode 100644 index 000000000..5635f592c --- /dev/null +++ b/content/en/docs/Developerguide/drop-function.md @@ -0,0 +1,50 @@ +# DROP FUNCTION + +## Function + +**DROP FUNCTION** deletes a function. + +## Precautions + +If a function involves operations on temporary tables, **DROP FUNCTION** cannot be used. + +## Syntax + +``` +DROP FUNCTION [ IF EXISTS ] function_name +[ ( [ {[ argmode ] [ argname ] argtype} [, ...] ] ) [ CASCADE | RESTRICT ] ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified function does not exist. + +- **function\_name** + + Specifies the name of the function to be deleted. + + Value range: an existing function name + +- **argmode** + + Specifies the parameter mode of the function. + +- **argname** + + Specifies the parameter name of the function. + +- **argtype** + + Specifies the parameter type of the function. + + +## Examples + +For details, see [Examples](create-function.md#en-us_topic_0237122104_en-us_topic_0059778837_scc61c5d3cc3e48c1a1ef323652dda821). + +## Helpful Links + +[ALTER FUNCTION](alter-function.md) and [CREATE FUNCTION](create-function.md) + diff --git a/content/en/docs/Developerguide/drop-group.md b/content/en/docs/Developerguide/drop-group.md new file mode 100644 index 000000000..09c018b14 --- /dev/null +++ b/content/en/docs/Developerguide/drop-group.md @@ -0,0 +1,26 @@ +# DROP GROUP + +## Function + +**DROP GROUP** deletes a user group. + +**DROP GROUP** is an alias for **DROP ROLE**. + +## Precautions + +**DROP GROUP** is an internal interface of the openGauss management tool. You are not advised to use this interface, because doing so affects openGauss. + +## Syntax + +``` +DROP GROUP [ IF EXISTS ] group_name [, ...]; +``` + +## Parameter Description + +See [Parameter Description](drop-role.md#en-us_topic_0237122147_en-us_topic_0059778848_sabe550f7ed48409b8ffd1d88ca9f0725) in **DROP ROLE**. + +## Helpful Links + +[CREATE GROUP](create-group.md), [ALTER GROUP](alter-group.md), and [DROP ROLE](drop-role.md) + diff --git a/content/en/docs/Developerguide/drop-index.md b/content/en/docs/Developerguide/drop-index.md new file mode 100644 index 000000000..13211a0e0 --- /dev/null +++ b/content/en/docs/Developerguide/drop-index.md @@ -0,0 +1,50 @@ +# DROP INDEX + +## Function + +**DROP INDEX** deletes an index. + +## Precautions + +Only the owner of an index or a system administrator has the **DROP INDEX** permission. + +## Syntax + +``` +DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] + index_name [, ...] [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **CONCURRENTLY** + + Deletes an index without locking it. A normal **DROP INDEX** acquires exclusive lock on the table on which the index depends, blocking other accesses until the index drop can be completed. With this option, the statement does not lock the table during index deletion. + + This parameter allows only one index name and does not support **CASCADE**. + + The **DROP INDEX** statement can be run within a transaction, but **DROP INDEX CONCURRENTLY** cannot. + +- **IF EXISTS** + + Reports a notice instead of an error if the specified index does not exist. + +- **index\_name** + + Specifies the index name to be deleted. + + Value range: an existing index + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects that depend on the index. + - **RESTRICT**: refuses to delete the index if any objects depend on it. This is the default action. + + +## Examples + +See [Examples](create-index.md#en-us_topic_0237122106_en-us_topic_0059777455_s985289833081489e9d77c485755bd362) in **CREATE INDEX**. + +## Helpful Links + +[ALTER INDEX](alter-index.md) and [CREATE INDEX](create-index.md) + diff --git a/content/en/docs/Developerguide/drop-owned.md b/content/en/docs/Developerguide/drop-owned.md new file mode 100644 index 000000000..032b8aabd --- /dev/null +++ b/content/en/docs/Developerguide/drop-owned.md @@ -0,0 +1,34 @@ +# DROP OWNED + +## Function + +**DROP OWNED** deletes the database objects owned by a database role. + +## Precautions + +- This interface will revoke the role's permissions on all objects in the current database and shared objects \(databases and tablespaces\). +- **DROP OWNED** is often used to prepare for removing one or more roles. Because **DROP OWNED** affects only the objects in the current database, you need to run this statement in each database that contains the objects owned by the role to be removed. +- Using the **CASCADE** option may cause this statement to recursively remove objects owned by other users. +- The databases and tablespaces owned by the role will not be removed. + +## Syntax + +``` +DROP OWNED BY name [, ...] [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **name** + + Specifies the role name. + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects that depend on the objects to be deleted. + - **RESTRICT**: refuses to delete the objects if other objects depend on them. This is the default action. + + +## Helpful Links + +[REASSIGN OWNED](reassign-owned.md) and [DROP ROLE](drop-role.md) + diff --git a/content/en/docs/Developerguide/drop-procedure.md b/content/en/docs/Developerguide/drop-procedure.md new file mode 100644 index 000000000..584ae9949 --- /dev/null +++ b/content/en/docs/Developerguide/drop-procedure.md @@ -0,0 +1,33 @@ +# DROP PROCEDURE + +## Function + +**DROP PROCEDURE** deletes a stored procedure. + +## Precautions + +None + +## Syntax + +``` +DROP PROCEDURE [ IF EXISTS ] procedure_name ; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified stored procedure does not exist. + +- **procedure\_name** + + Specifies the name of the stored procedure to be deleted. + + Value range: an existing stored procedure name + + +## Helpful Links + +[CREATE PROCEDURE](create-procedure.md) + diff --git a/content/en/docs/Developerguide/drop-role.md b/content/en/docs/Developerguide/drop-role.md new file mode 100644 index 000000000..40d0e356f --- /dev/null +++ b/content/en/docs/Developerguide/drop-role.md @@ -0,0 +1,37 @@ +# DROP ROLE + +## Function + +**DROP ROLE** deletes a role. + +## Precautions + +None + +## Syntax + +``` +DROP ROLE [ IF EXISTS ] role_name [, ...]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified role does not exist. + +- **role\_name** + + Specifies the name of the role to be deleted. + + Value range: an existing role name + + +## Examples + +See [Example:](create-role.md#en-us_topic_0237122112_en-us_topic_0059778189_s0dea2f90b8474387aff0ab3f366a611e) in **CREATE ROLE**. + +## Helpful Links + +[CREATE ROLE](create-role.md), [ALTER ROLE](alter-role.md), and [SET ROLE](set-role.md) + diff --git a/content/en/docs/Developerguide/drop-row-level-security-policy.md b/content/en/docs/Developerguide/drop-row-level-security-policy.md new file mode 100644 index 000000000..37e7d81d4 --- /dev/null +++ b/content/en/docs/Developerguide/drop-row-level-security-policy.md @@ -0,0 +1,53 @@ +# DROP ROW LEVEL SECURITY POLICY + +## Function + +**DROP ROW LEVEL SECURITY POLICY** deletes a row-level access control policy from a table. + +## Precautions + +Only the owner of a table or a system administrator has the **DROP ROW LEVEL SECURITY POLICY** permission. + +## Syntax + +``` +DROP [ ROW LEVEL SECURITY ] POLICY [ IF EXISTS ] policy_name ON table_name [ CASCADE | RESTRICT ] +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified row-level access control policy does not exist. + +- **policy\_name** + + Specifies the name of the row-level access control policy to be deleted. + + - table\_name + + Specifies the name of the table containing the row-level access control policy. + + - CASCADE/RESTRICT + + Currently, no objects depend on row-level access control policies. Therefore, **CASCADE** is equivalent to **RESTRICT**, and they are reserved to ensure backward compatibility. + + + +## Examples + +``` +-- Create the data table all_data. +postgres=# CREATE TABLE all_data(id int, role varchar(100), data varchar(100)); + +-- Create a row-level access control policy. +postgres=# CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = CURRENT_USER); + +-- Delete a row-level access control policy. +postgres=# DROP ROW LEVEL SECURITY POLICY all_data_rls ON all_data; +``` + +## Helpful Links + +[ALTER ROW LEVEL SECURITY POLICY](alter-row-level-security-policy.md) and [CREATE ROW LEVEL SECURITY POLICY](create-row-level-security-policy.md) + diff --git a/content/en/docs/Developerguide/drop-schema.md b/content/en/docs/Developerguide/drop-schema.md new file mode 100644 index 000000000..9de13953c --- /dev/null +++ b/content/en/docs/Developerguide/drop-schema.md @@ -0,0 +1,47 @@ +# DROP SCHEMA + +## Function + +**DROP SCHEMA** deletes a schema from the current database. + +## Precautions + +Only the owner of a schema or a system administrator has the **DROP SCHEMA** permission. + +## Syntax + +``` +DROP SCHEMA [ IF EXISTS ] schema_name [, ...] [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified schema does not exist. + +- **schema\_name** + + Specifies the name of the schema to be deleted. + + Value range: an existing schema name + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes all the objects contained in the schema. + - **RESTRICT**: refuses to delete the schema if the schema contains objects. This is the default action. + + +>![](public_sys-resources/icon-notice.gif) **NOTICE:** +>Schemas beginning with **pg\_temp** or **pg\_toast\_temp** are for internal use. Do not delete them. Otherwise, unexpected consequences may be incurred. + +>![](public_sys-resources/icon-note.gif) **NOTE:** +>The schema currently being used cannot be deleted. To delete it, switch to another schema first. + +## Examples + +See [Examples](create-schema.md#en-us_topic_0237122113_en-us_topic_0059777945_s05e72232af5e4507aad1511c025d7617) in **CREATE SCHEMA**. + +## Helpful Links + +[ALTER SCHEMA](alter-schema.md) and [CREATE SCHEMA](create-schema.md) + diff --git a/content/en/docs/Developerguide/drop-sequence.md b/content/en/docs/Developerguide/drop-sequence.md new file mode 100644 index 000000000..ce2ca4084 --- /dev/null +++ b/content/en/docs/Developerguide/drop-sequence.md @@ -0,0 +1,49 @@ +# DROP SEQUENCE + +## Function + +**DROP SEQUENCE** deletes a sequence from the current database. + +## Precautions + +Only the owner of a schema or a system administrator has the **DROP SEQUENCE** permission. + +## Syntax + +``` +DROP SEQUENCE [ IF EXISTS ] {[schema.]sequence_name} [ , ... ] [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified sequence does not exist. + +- **name** + + Specifies the name of the sequence to be deleted. + +- **CASCADE** + + Automatically deletes the objects that depend on the sequence. + +- **RESTRICT** + + Refuses to delete the sequence if any objects depend on it. This is the default action. + + +## Examples + +``` +-- Create an ascending sequence named serial, starting from 101. +postgres=# CREATE SEQUENCE serial START 101; + +-- Delete a sequence. +postgres=# DROP SEQUENCE serial; +``` + +## Helpful Links + +[ALTER SEQUENCE](alter-sequence.md) and [DROP SEQUENCE](drop-sequence.md) + diff --git a/content/en/docs/Developerguide/drop-synonym.md b/content/en/docs/Developerguide/drop-synonym.md new file mode 100644 index 000000000..f13a7d333 --- /dev/null +++ b/content/en/docs/Developerguide/drop-synonym.md @@ -0,0 +1,39 @@ +# DROP SYNONYM + +## Function + +**DROP SYNONYM** deletes a synonym. + +## Precautions + +Only the owner of a synonym or a system administrator has the **DROP SYNONYM** permission. + +## Syntax + +``` +DROP SYNONYM [ IF EXISTS ] synonym_name [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified synonym does not exist. + +- **synonym\_name** + + Specifies the name \(optionally schema-qualified\) of the synonym to be deleted. + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects \(such as views\) that depend on the synonym. + - **RESTRICT**: refuses to delete the synonym if any objects depend on it. This is the default action. + + +## Examples + +See [Example](create-synonym.md#en-us_topic_0237122116_section1853433744413) in **CREATE SYNONYM**. + +## Helpful Links + +[ALTER SYNONYM](alter-synonym.md) and [CREATE SYNONYM](create-synonym.md) + diff --git a/content/en/docs/Developerguide/drop-table.md b/content/en/docs/Developerguide/drop-table.md new file mode 100644 index 000000000..9844d34a8 --- /dev/null +++ b/content/en/docs/Developerguide/drop-table.md @@ -0,0 +1,44 @@ +# DROP TABLE + +## Function + +**DROP TABLE** deletes a table. + +## Precautions + +**DROP TABLE** forcibly deletes the specified table and the indexes depending on the table. After the table is deleted, the functions and stored procedures that need to use this table cannot be executed. Deleting a partitioned table also deletes all partitions in the table. + +## Syntax + +``` +DROP TABLE [ IF EXISTS ] + { [schema.]table_name } [, ...] [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified table does not exist. + +- **schema** + + Specifies the schema name. + +- **table\_name** + + Specifies the name of the table to be deleted. + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects \(such as views\) that depend on the table. + - **RESTRICT**: refuses to delete the table if any objects depend on it. This is the default action. + + +## Examples + +See [Example:](create-table.md#en-us_topic_0237122117_en-us_topic_0059778169_s86758dcf05d442d2a9ebd272e76ed1b8) in **CREATE TABLE**. + +## Helpful Links + +[ALTER TABLE](alter-table.md) and [CREATE TABLE](create-table.md) + diff --git a/content/en/docs/Developerguide/drop-tablespace.md b/content/en/docs/Developerguide/drop-tablespace.md new file mode 100644 index 000000000..cd617040e --- /dev/null +++ b/content/en/docs/Developerguide/drop-tablespace.md @@ -0,0 +1,48 @@ +# DROP TABLESPACE + +## Function + +**DROP TABLESPACE** deletes a tablespace. + +## Precautions + +- Only the owner of a tablespace or a system administrator has the **DROP TABLESPACE** permission. +- The tablespace to be deleted should not contain any database objects. Otherwise, an error will be reported. +- **DROP TABLESPACE** cannot be rolled back and therefore cannot be run in transaction blocks. +- During execution of **DROP TABLESPACE**, database queries by other sessions using **\\db** may fail and need to be reattempted. +- If **DROP TABLESPACE** fails to be executed, run **DROP TABLESPACE IF EXISTS**. + +## Syntax + +``` +DROP TABLESPACE [ IF EXISTS ] tablespace_name; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified tablespace does not exist. + +- **tablespace\_name** + + Specifies the name of the tablespace to be deleted. + + Value range: an existing tablespace name + + +## Examples + +See [Examples](create-tablespace.md#en-us_topic_0237122120_en-us_topic_0059777670_s4e5e97caa377440d87fad0d49b56323e) in **CREATE TABLESPACE**. + +## Helpful Links + +[ALTER TABLESPACE](alter-tablespace.md) and [CREATE TABLESPACE](create-tablespace.md) + +## Suggestions + +- drop database + + Do not delete databases during transactions. + + diff --git a/content/en/docs/Developerguide/drop-text-search-configuration.md b/content/en/docs/Developerguide/drop-text-search-configuration.md new file mode 100644 index 000000000..c3ac7819a --- /dev/null +++ b/content/en/docs/Developerguide/drop-text-search-configuration.md @@ -0,0 +1,43 @@ +# DROP TEXT SEARCH CONFIGURATION + +## Function + +**DROP TEXT SEARCH CONFIGURATION** deletes a text search configuration. + +## Precautions + +Only the owner of a text search configuration has the **DROP TEXT SEARCH CONFIGURATION** permission. + +## Syntax + +``` +DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified text search configuration does not exist. + +- **name** + + Specifies the name \(optionally schema-qualified\) of the text search configuration to be deleted. + +- **CASCADE** + + Automatically deletes the objects that depend on the text search configuration. + +- **RESTRICT** + + Refuses to delete the text search configuration if any objects depend on it. This is the default action. + + +## Examples + +See [Examples](create-text-search-configuration.md#en-us_topic_0237122121_en-us_topic_0059777835_sc3a4aef5c0c0420eaf5a2e67097004a2) in **CREATE TEXT SEARCH CONFIGURATION**. + +## Helpful Links + +[ALTER TEXT SEARCH CONFIGURATION](alter-text-search-configuration.md) and [CREATE TEXT SEARCH CONFIGURATION](create-text-search-configuration.md) + diff --git a/content/en/docs/Developerguide/drop-text-search-dictionary.md b/content/en/docs/Developerguide/drop-text-search-dictionary.md new file mode 100644 index 000000000..9bcaf2c61 --- /dev/null +++ b/content/en/docs/Developerguide/drop-text-search-dictionary.md @@ -0,0 +1,52 @@ +# DROP TEXT SEARCH DICTIONARY + +## Function + +**DROP TEXT SEARCH DICTIONARY** deletes a full-text retrieval dictionary. + +## Precautions + +- Predefined dictionaries do not support the **DROP** operations. +- Only the owner of a dictionary or a system administrator has the **DROP TEXT SEARCH DICTIONARY** permission. +- Execute **DROP...CASCADE** only when necessary because this operation will delete the text search configurations that use this dictionary. + +## Syntax + +``` +DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ] +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified full-text retrieval dictionary does not exist. + +- **name** + + Specifies the name of the dictionary to be deleted. \(If you do not specify a schema name, the dictionary in the current schema will be used.\) + + Value range: an existing dictionary name + +- **CASCADE** + + Automatically deletes the objects that depend on the full-text retrieval dictionary and other objects that depend on these objects. + + If any text search configuration uses the dictionary, the **DROP** statement will fail. You can add **CASCADE** to delete all text search configurations and dictionaries that use this dictionary. + +- **RESTRICT** + + Refuses to delete the full-text retrieval dictionary if any object depends on it. This is the default action. + + +## Examples + +``` +-- Delete the english dictionary. +DROP TEXT SEARCH DICTIONARY english; +``` + +## Helpful Links + +[ALTER TEXT SEARCH DICTIONARY](alter-text-search-dictionary.md) and [CREATE TEXT SEARCH DICTIONARY](create-text-search-dictionary.md) + diff --git a/content/en/docs/Developerguide/drop-trigger.md b/content/en/docs/Developerguide/drop-trigger.md new file mode 100644 index 000000000..cef644eb5 --- /dev/null +++ b/content/en/docs/Developerguide/drop-trigger.md @@ -0,0 +1,47 @@ +# DROP TRIGGER + +## Function + +**DROP TRIGGER** deletes a trigger. + +## Precautions + +Only the owner of a trigger or a system administrator has the **DROP TRIGGER** permission. + +## Syntax + +``` +DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified trigger does not exist. + +- **trigger\_name** + + Specifies the name of the trigger to be deleted. + + Value range: an existing trigger name + +- **table\_name** + + Specifies the name of the table containing the trigger. + + Value range: name of the table containing the trigger + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects that depend on the trigger. + - **RESTRICT**: refuses to delete the trigger if any objects depend on it. This is the default action. + + +## Examples + +For details, see [Examples](create-trigger.md#en-us_topic_0237122123_en-us_topic_0059778166_sfbca773f5bcd4799b3ea668b3eb074fa) in [CREATE TRIGGER](create-trigger.md). + +## Helpful Links + +[CREATE TRIGGER](create-trigger.md), [ALTER TRIGGER](alter-trigger.md), and [ALTER TABLE](alter-table.md) + diff --git a/content/en/docs/Developerguide/drop-type.md b/content/en/docs/Developerguide/drop-type.md new file mode 100644 index 000000000..21b1a4781 --- /dev/null +++ b/content/en/docs/Developerguide/drop-type.md @@ -0,0 +1,39 @@ +# DROP TYPE + +## Function + +**DROP TYPE** deletes a user-defined data type. Only the owner of a type has the **DROP TYPE** permission. + +## Syntax + +``` +DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified type does not exist. + +- **name** + + Specifies the name \(optionally schema-qualified\) of the type to be deleted. + +- **CASCADE** + + Automatically deletes the objects \(such as fields, functions, and operators\) that depend on the type. + + **RESTRICT** + + Refuses to delete the type if any objects depend on it. This is the default action. + + +## Example + +See [Examples](create-type.md#en-us_topic_0237122124_en-us_topic_0059779377_s66a0b4a6a1df4ba4a116c6c565a0fe9d) in **CREATE TYPE**. + +## Helpful Links + +[CREATE TYPE](create-type.md) and [ALTER TYPE](alter-type.md) + diff --git a/content/en/docs/Developerguide/drop-user.md b/content/en/docs/Developerguide/drop-user.md new file mode 100644 index 000000000..86250bcb9 --- /dev/null +++ b/content/en/docs/Developerguide/drop-user.md @@ -0,0 +1,53 @@ +# DROP USER + +## Function + +**DROP USER** deletes a user and the schema with the same name as the user. + +## Precautions + +- **CASCADE** is used to delete the objects \(excluding databases\) that depend on the user. **CASCADE** cannot delete locked objects unless the objects are unlocked or the processes locking the objects are killed. +- In openGauss, the **enable\_kill\_query** configuration parameter exists in the **postgresql.conf** file. This parameter affects **CASCADE**. + - If **enable\_kill\_query** is **on** and **CASCADE** is used, the statement automatically kills the processes locking dependent objects and then deletes the specified user. + - If **enable\_kill\_query** is **off** and **CASCADE** is used, the statement waits until the processes locking dependent objects stop and then deletes the specified user. + +- If the dependent objects are other databases or reside in other databases, manually delete them before deleting the user from the current database. **DROP USER** cannot delete objects across databases. +- If a data source depends on the user, the user cannot be deleted directly. You need to manually delete the data source first. + +## Syntax + +``` +DROP USER [ IF EXISTS ] user_name [, ...] [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified user does not exist. + +- **user\_name** + + Specifies the name of the user to be deleted. + + Value range: an existing username + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects that depend on the user. + - **RESTRICT**: refuses to delete the user if any objects depend on it. This is the default action. + + >![](public_sys-resources/icon-note.gif) **NOTE:** + >In openGauss, the **enable\_kill\_query** configuration parameter exists in the **postgresql.conf** file. This parameter affects **CASCADE**. + >- If **enable\_kill\_query** is **on** and **CASCADE** is used, the statement automatically kills the processes locking dependent objects and then deletes the specified user. + >- If **enable\_kill\_query** is **off** and **CASCADE** is used, the statement waits until the processes locking dependent objects stop and then deletes the specified user. + + + +## Examples + +See [Example](create-user.md#en-us_topic_0237122125_en-us_topic_0059778166_sfbca773f5bcd4799b3ea668b3eb074fa) in **CREATE USER**. + +## Helpful Links + +[ALTER USER](alter-user.md) and [CREATE USER](create-user.md) + diff --git a/content/en/docs/Developerguide/drop-view.md b/content/en/docs/Developerguide/drop-view.md new file mode 100644 index 000000000..0fe1597ae --- /dev/null +++ b/content/en/docs/Developerguide/drop-view.md @@ -0,0 +1,41 @@ +# DROP VIEW + +## Function + +**DROP VIEW** forcibly deletes a view from the database. + +## Precautions + +Only the owner of a view or a system administrator has the **DROP VIEW** permission. + +## Syntax + +``` +DROP VIEW [ IF EXISTS ] view_name [, ...] [ CASCADE | RESTRICT ]; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified view does not exist. + +- **view\_name** + + Specifies the name of the view to be deleted. + + Value range: an existing view name + +- **CASCADE | RESTRICT** + - **CASCADE**: automatically deletes the objects \(such as other views\) that depend on the view. + - **RESTRICT**: refuses to delete the view if any objects depend on it. This is the default action. + + +## Examples + +See [Examples](create-view.md#en-us_topic_0237122126_en-us_topic_0059779377_s66a0b4a6a1df4ba4a116c6c565a0fe9d) in **CREATE VIEW**. + +## Helpful Links + +[ALTER VIEW](alter-view.md) and [CREATE VIEW](create-view.md) + diff --git a/content/en/docs/Developerguide/durability-20.md b/content/en/docs/Developerguide/durability-20.md new file mode 100644 index 000000000..9eedf3031 --- /dev/null +++ b/content/en/docs/Developerguide/durability-20.md @@ -0,0 +1,19 @@ +# Durability + +NIR + +Write-Ahead Logging \(WAL\) is a standard method for ensuring data durability. WAL's central concept is that changes to data files \(where tables and indexes reside\) must be written only after those changes have been logged, that is, after log records describing the changes have been flushed to permanent storage. + +The MOT is fully integrated with the envelope openGauss/GaussDB logging facilities. Besides durability and additional benefit of this method is being able to use it for replication purposes as well. + +Three logging methods are supported, two standard “Synchronous” and “Asynchronous” which are also supported by the standard disk-engine. In addition, in the MOT a new “Group-Commit” with special NUMA-Awareness optimization is introduced. The Group-Commit provides the top performance while maintaining ACID properties. + +Vladi / Eli: Text text text …a paragraph about the state of the art checkpoint module…. + +- **[Exception Handling](exception-handling.md)** + +- **[Logging](logging-21.md)** + +- **[Checkpoint](checkpoint-22.md)** + + diff --git a/content/en/docs/Developerguide/durability.md b/content/en/docs/Developerguide/durability.md new file mode 100644 index 000000000..108ef57e0 --- /dev/null +++ b/content/en/docs/Developerguide/durability.md @@ -0,0 +1,8 @@ +# Durability + +Durability refers to long-term data protection. This means that stored data does not suffer from any kind of degradation or corruption, so that data is never lost or compromised. + +- NIR +- PosgreSQL.config +- synchronous\_commit = on + diff --git a/content/en/docs/Developerguide/dynamic-statements.md b/content/en/docs/Developerguide/dynamic-statements.md new file mode 100644 index 000000000..3a6d59509 --- /dev/null +++ b/content/en/docs/Developerguide/dynamic-statements.md @@ -0,0 +1,11 @@ +# Dynamic Statements + +- **[Executing Dynamic Query Statements](executing-dynamic-query-statements.md)** + +- **[Executing Dynamic Non-query Statements](executing-dynamic-non-query-statements.md)** + +- **[Dynamically Calling Stored Procedures](dynamically-calling-stored-procedures.md)** + +- **[Dynamically Calling Anonymous Blocks](dynamically-calling-anonymous-blocks.md)** + + diff --git a/content/en/docs/Developerguide/dynamically-calling-anonymous-blocks.md b/content/en/docs/Developerguide/dynamically-calling-anonymous-blocks.md new file mode 100644 index 000000000..5d9bfa1ab --- /dev/null +++ b/content/en/docs/Developerguide/dynamically-calling-anonymous-blocks.md @@ -0,0 +1,23 @@ +# Dynamically Calling Anonymous Blocks + +This section describes how to execute anonymous blocks in dynamic statements. Append **IN** and **OUT** behind the **EXECUTE IMMEDIATE...USING** statement to input and output parameters. + +## Syntax + +[Figure 1](#en-us_topic_0237122228_en-us_topic_0059778140_fcac14cc166724cca818d8c659b30fbb9) shows the syntax diagram. + +**Figure 1** call\_anonymous\_block::= +![](figures/call_anonymous_block.png "call_anonymous_block") + +[Figure 2](#en-us_topic_0237122228_en-us_topic_0059778140_f06fb8cdac8dc4c42bacd550e446ca6bd) shows the syntax diagram for **using\_clause**. + +**Figure 2** using\_clause::= +![](figures/using_clause-2.png "using_clause-2") + +The above syntax diagram is explained as follows: + +- The execute part of an anonymous block starts with a **BEGIN** statement, has a break with an **END** statement, and ends with a semicolon \(;\). +- **USING \[IN|OUT|IN OUT\]bind\_argument**: specifies where the variable passed to the stored procedure parameter value is stored. The modifiers in front of **bind\_argument** and of the corresponding parameter are the same. +- The input and output parameters in the middle of an anonymous block are designated by placeholders. The numbers of the placeholders and parameters are the same. The sequences of the parameters corresponding to the placeholders and the USING parameters are the same. +- Currently in openGauss, when dynamic statements call anonymous blocks, placeholders cannot be used to pass input and output parameters in an **EXCEPTION** statement. + diff --git a/content/en/docs/Developerguide/dynamically-calling-stored-procedures.md b/content/en/docs/Developerguide/dynamically-calling-stored-procedures.md new file mode 100644 index 000000000..7128a78df --- /dev/null +++ b/content/en/docs/Developerguide/dynamically-calling-stored-procedures.md @@ -0,0 +1,22 @@ +# Dynamically Calling Stored Procedures + +This section describes how to dynamically call store procedures. You must use anonymous statement blocks to package stored procedures or statement blocks and append **IN** and **OUT** behind the **EXECUTE IMMEDIATE...USING** statement to input and output parameters. + +## Syntax + +[Figure 1](#en-us_topic_0237122227_en-us_topic_0059778625_f7bf3ce30f4aa42d38394f459c525f33b) shows the syntax diagram. + +**Figure 1** call\_procedure::= +![](figures/call_procedure.png "call_procedure") + +[Figure 2](#en-us_topic_0237122227_en-us_topic_0059778625_fd82a97bfa5774a32bd19b36b80dd5248) shows the syntax diagram for **using\_clause**. + +**Figure 2** using\_clause::= +![](figures/using_clause-1.png "using_clause-1") + +The above syntax diagram is explained as follows: + +- **CALL procedure\_name**: calls the stored procedure. +- **\[:placeholder1,:placeholder2,...\]**: specifies the placeholder list of the stored procedure parameters. The numbers of the placeholders and parameters are the same. +- **USING \[IN|OUT|IN OUT\]bind\_argument**: specifies where the variable passed to the stored procedure parameter value is stored. The modifiers in front of **bind\_argument** and of the corresponding parameter are the same. + diff --git a/content/en/docs/Developerguide/environment-deployment.md b/content/en/docs/Developerguide/environment-deployment.md new file mode 100644 index 000000000..a5fb97312 --- /dev/null +++ b/content/en/docs/Developerguide/environment-deployment.md @@ -0,0 +1,231 @@ +# Environment Deployment + +## Prerequisites + +openGauss is in the normal state. The user logs in to openGauss with an authenticated identity. The executed SQL syntax is correct and no error is reported. In the historical performance data window, the number of openGauss concurrent tasks is stable, the structure and number of tables remain unchanged, the data volume changes smoothly, and the GUC parameters related to query performance remain unchanged. During prediction, the model has been trained and converged. The running environment of the AIEngine is stable. + +## Request Example + +The AIEngine process communicates with the kernel process using HTTPS. An example request is as follows: + +``` +curl -X POST -d '{"modelName":"modelname"}' -H 'Content-Type: application/json' 'https://IP-address:port/request-API' +``` + +**Table 1** AIEngine external APIs + + + + + + + + + + + + + + + + + + + + + + + + + +

Request API

+

Description

+

/check

+

Checks whether a model is properly started.

+

/configure

+

Sets model parameters.

+

/train

+

Trains a model.

+

/track_process

+

Views model training logs.

+

/setup

+

Loads historical models.

+

/predict

+

Predicts a model.

+
+ +## Generating Certificates + +Before using the prediction function, you need to use OpenSSL to generate certificates required for authentication between the communication parties, ensuring communication security. + +1. Set up a certificate generation environment. The certificate file is stored in **$GAUSSHOME/CA**. + + -- Copy the certificate generation script and related files. + + ``` + cp path_to_predictor/install/ssl.sh $GAUSSHOME/ + cp path_to_predictor/install/ca_ext.txt $GAUSSHOME/ + ``` + + -- Copy the configuration file **openssl.cnf** to **$GAUSSHOME**. + + ``` + cp $GAUSSHOME/share/om/openssl.cnf $GAUSSHOME/ + ``` + + -- Modify the configuration parameters in **openssl.conf**. + + ``` + dir = $GAUSSHOME/CA/demoCA + default_md = sha256 + ``` + + -- The certificate generation environment is ready. + +2. Generate a certificate and private key. + + ``` + cd $GAUSSHOME + sh ssl.sh + ``` + + -- Set the password as prompted, for example, **Test@123**. + + -- The password must contain at least eight characters of at least three different types. + + ``` + Please enter your password: + ``` + + -- Set the options as prompted. + + ``` + Certificate Details: + Serial Number: 1 (0x1) + Validity + Not Before: May 15 08:32:44 2020 GMT + Not After : May 15 08:32:44 2021 GMT + Subject: + countryName = CN + stateOrProvinceName = SZ + organizationName = HW + organizationalUnitName = GS + commonName = CA + X509v3 extensions: + X509v3 Basic Constraints: + CA:TRUE + Certificate is to be certified until May 15 08:32:44 2021 GMT (365 days) + Sign the certificate? [y/n]:y + 1 out of 1 certificate requests certified, commit? [y/n]y + ``` + + -- Enter the IP address for starting the AIEngine, for example, **127.0.0.1**. + + ``` + Please enter your aiEngine IP: 127.0.0.1 + ``` + + -- Set the options as prompted. + + ``` + Certificate Details: + Serial Number: 2 (0x2) + Validity + Not Before: May 15 08:38:07 2020 GMT + Not After : May 13 08:38:07 2030 GMT + Subject: + countryName = CN + stateOrProvinceName = SZ + organizationName = HW + organizationalUnitName = GS + commonName = 127.0.0.1 + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Certificate is to be certified until May 13 08:38:07 2030 GMT (3650 days) + Sign the certificate? [y/n]:y + 1 out of 1 certificate requests certified, commit? [y/n]y + ``` + + -- Enter the IP address for starting openGauss, for example, **127.0.0.1**. + + ``` + Please enter your gaussdb IP: 127.0.0.1 + ``` + + -- Set the options as prompted. + + ``` + Certificate Details: + Serial Number: 3 (0x3) + Validity + Not Before: May 15 08:41:46 2020 GMT + Not After : May 13 08:41:46 2030 GMT + Subject: + countryName = CN + stateOrProvinceName = SZ + organizationName = HW + organizationalUnitName = GS + commonName = 127.0.0.1 + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Certificate is to be certified until May 13 08:41:46 2030 GMT (3650 days) + Sign the certificate? [y/n]:y + 1 out of 1 certificate requests certified, commit? [y/n]y + ``` + + -- The related certificate and key have been generated. The content in **$GAUSSHOME/CA** is as follows: + + ![](figures/en-us_image_0253082069.png) + + +## Setting Up the Environment + +1. Copy the tool code folder to the target environment. + + -- Assume that the installation directory is **$INSTALL\_FOLDER**. + + -- Assume that the destination directory is **/home/ai\_user**. + + ``` + scp -r $INSTALL_FOLDER/bin/dbmind/predictor ai_user@127.0.0.1:path_to_Predictor + ``` + +2. Copy the CA certificate folder to a directory in the AIEngine environment. + + ``` + cp -r $GAUSSHOME/CA ai_user@127.0.0.1:path_to_CA + ``` + +3. Install the **predictor/install/requirements\(-gpu\).txt** tool. + + ``` + With GPU: pip install requirements-gpu.txt + Without GPU: pip install requirements.txt + ``` + + +## Starting AIEngine + +1. Switch to the AIEngine environment \(that is, copy the target environment **ai\_user** of the predictor\). + + Set parameters in **predictor/python/settings.py**. + + ``` + DEFAULT_FLASK_SERVER_HOST = '127.0.0.1' (running IP address of AIEngine) + DEFAULT_FLASK_SERVER_PORT = '5000' (running port number of AIEngine) + PATH_SSL = "path_to_CA" (CA folder path) + ``` + +2. Run the startup script of AIEngine. + + ``` + python path_to_Predictor/python/run.py + ``` + + In this case, the AIEngine keeps enabled on the corresponding port and waits for the request of the time prediction function from the kernel. + + For details about how to initiate a time prediction command from the kernel, see the _Time Prediction Usage Guide_. + + diff --git a/content/en/docs/Developerguide/error-log.md b/content/en/docs/Developerguide/error-log.md new file mode 100644 index 000000000..8080403f4 --- /dev/null +++ b/content/en/docs/Developerguide/error-log.md @@ -0,0 +1,29 @@ +# ERROR LOG + +- **log\_level = INFO** + + Configures the log level of messages issued by the MOT engine and recorded in the Error log of the database server. Valid values are PANIC, ERROR, WARN, INFO, TRACE, DEBUG, DIAG1 and DIAG2. + +- **Log/COMPONENT/LOGGER=LOG\_LEVEL** + + Configures specific loggers using the syntax described below. + + For example, to configure the TRACE log level for the ThreadIdPool logger in system component, use the following syntax – + + ``` + Log/System/ThreadIdPool=TRACE + ``` + + To configure the log level for all loggers under some component, use the following syntax. + + ``` + Log/COMPONENT=LOG_LEVEL + ``` + + For example: + + ``` + Log/System=DEBUG + ``` + + diff --git a/content/en/docs/Developerguide/error-reporting-and-logging.md b/content/en/docs/Developerguide/error-reporting-and-logging.md new file mode 100644 index 000000000..980d2ad9e --- /dev/null +++ b/content/en/docs/Developerguide/error-reporting-and-logging.md @@ -0,0 +1,11 @@ +# Error Reporting and Logging + +- **[Logging Destination](logging-destination.md)** + +- **[Logging Time](logging-time.md)** + +- **[Logging Content](logging-content.md)** + +- **[Using CSV Log Output](using-csv-log-output.md)** + + diff --git a/content/en/docs/Developerguide/error-trapping-statements.md b/content/en/docs/Developerguide/error-trapping-statements.md new file mode 100644 index 000000000..5bb6d5faf --- /dev/null +++ b/content/en/docs/Developerguide/error-trapping-statements.md @@ -0,0 +1,112 @@ +# Error Trapping Statements + +By default, any error occurring in a PL/SQL function aborts execution of the function, and indeed of the surrounding transaction as well. You can trap errors and restore from them by using a **BEGIN** block with an **EXCEPTION** clause. The syntax is an extension of the normal syntax for a **BEGIN** block: + +``` +[<