diff --git a/archetypes/default.md b/archetypes/default.md new file mode 100644 index 0000000000000000000000000000000000000000..00e77bd79be44872c0b29256b03799c2fb00c10d --- /dev/null +++ b/archetypes/default.md @@ -0,0 +1,6 @@ +--- +title: "{{ replace .Name "-" " " | title }}" +date: {{ .Date }} +draft: true +--- + diff --git a/config.toml b/config.toml new file mode 100644 index 0000000000000000000000000000000000000000..c06e7f3c5d0f41d1c7ede181b2835051b89bdfb8 --- /dev/null +++ b/config.toml @@ -0,0 +1,66 @@ +baseURL = "https://openeuler.org/" +title = "openEuler documentation" + +disablePathToLower = true + +# Site language. Available translations in the theme's `/i18n` directory. +languageCode = "en-us" +defaultContentLanguage = "zh" +defaultContentLanguageInSubdir = true +# Enable comments by entering your Disqus shortname +disqusShortname = "" +# Enable Google Analytics by entering your tracking code +googleAnalytics = "" + +uglyURLs = true +buildFuture = true + +# Define the number of posts per page +paginate = 10 + + +[languages] + +[languages.en] + languageCode = "en" + contentDir = "content/en" + +[languages.zh] + languageCode = "zh" + contentDir = "content/zh" + +[languages.ru] + languageCode = "ru" + contentDir = "content/ru" + + + +[params] + author = "openEuler" + defaultKeywords = ["devows", "hugo", "go", "openEuler"] + defaultDescription = "Site template made by devcows Modified by openEuler team" + searchTitle = "openEuler Documentation" + email = "contact@openeuler.org" + + ### DOC Configurations + BookRepo = 'https://gitee.com/opengauss/docs/blob' + BookEditPath= 'blob/master/content' + BookDateFormat = 'Jan 2, 2019' + enableGitInfo = true + BookMenuBundle = '/menu' + bookHidden = true + bookFlatSection = true + BookMenuBundleActiveLinkColor = '\#004ed0' + + languageZh = "中文" + languageEn = "English" + languageRu = "Русский" + + ltsText = "LTS" + version_95 = "21.03" + version_96 = "20.03 LTS SP1" + version_97 = "20.03 LTS" + version_98 = "20.09" + + navLink = "For Documentation" + diff --git a/content/en/search/_index.md b/content/en/search/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..13d22ccfd74e6d40c6bbf767de738c1aed3d0b42 --- /dev/null +++ b/content/en/search/_index.md @@ -0,0 +1,3 @@ ++++ +title = "search" ++++ \ No newline at end of file diff --git a/content/ru/search/_index.md b/content/ru/search/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..13d22ccfd74e6d40c6bbf767de738c1aed3d0b42 --- /dev/null +++ b/content/ru/search/_index.md @@ -0,0 +1,3 @@ ++++ +title = "search" ++++ \ No newline at end of file diff --git a/content/zh/search/_index.md b/content/zh/search/_index.md new file mode 100644 index 0000000000000000000000000000000000000000..41ce1c6d4ed27206dd305fdea687f46ae67635b8 --- /dev/null +++ b/content/zh/search/_index.md @@ -0,0 +1,3 @@ ++++ +title = "搜索" ++++ \ No newline at end of file diff --git a/data/docsversion/95.yaml b/data/docsversion/95.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9edd8abc10966a4ba4cb6dc3067f872a3e381fbd --- /dev/null +++ b/data/docsversion/95.yaml @@ -0,0 +1,2 @@ +version : "21.03" +index: "95" \ No newline at end of file diff --git a/data/docsversion/96.yaml b/data/docsversion/96.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91b96a53a40ce467fabfe4a40479179d0ae85fc8 --- /dev/null +++ b/data/docsversion/96.yaml @@ -0,0 +1,2 @@ +version : "20.30 LTS SP1" +index: "96" \ No newline at end of file diff --git a/data/docsversion/97.yaml b/data/docsversion/97.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4e95b2fac096dbee2ec14967eef6e82b19193f1 --- /dev/null +++ b/data/docsversion/97.yaml @@ -0,0 +1,2 @@ +version : "20.09" +index: "98" \ No newline at end of file diff --git a/data/docsversion/98.yaml b/data/docsversion/98.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21c7cb7d309911890c065be2f51241d472ab05ff --- /dev/null +++ b/data/docsversion/98.yaml @@ -0,0 +1,2 @@ +version : "20.30 LTS" +index: "97" \ No newline at end of file diff --git a/docs/en/docs/A-Tune/A-Tune.md b/docs/en/docs/A-Tune/A-Tune.md deleted file mode 100644 index cb94a36db10e5d10f1ed758055c3a7ad99011d38..0000000000000000000000000000000000000000 --- a/docs/en/docs/A-Tune/A-Tune.md +++ /dev/null @@ -1,5 +0,0 @@ -# A-Tune User Guide - -This document describes how to install and use A-Tune, which is a performance self-optimization software for openEuler. - -This document is intended for developers, open-source enthusiasts, and partners who use the openEuler system and want to know and use A-Tune. You need to have basic knowledge of the Linux OS. \ No newline at end of file diff --git a/docs/en/docs/A-Tune/appendixes.md b/docs/en/docs/A-Tune/appendixes.md deleted file mode 100644 index 2d776555c04a00f5a7c56e5d8b503925019af32a..0000000000000000000000000000000000000000 --- a/docs/en/docs/A-Tune/appendixes.md +++ /dev/null @@ -1,25 +0,0 @@ -# Appendixes - -- [Appendixes](#appendixes) - - [Acronyms and Abbreviations](#acronyms-and-abbreviations) - - -## Acronyms and Abbreviations - -**Table 1** Terminology - - - - - - - - - -

Term

-

Description

-

profile

-

Set of optimization items and optimal parameter configuration.

-
- - diff --git a/docs/en/docs/A-Tune/application-scenarios.md b/docs/en/docs/A-Tune/application-scenarios.md deleted file mode 100644 index e1401b69a97062fdb9513018a30df0916b9e464e..0000000000000000000000000000000000000000 --- a/docs/en/docs/A-Tune/application-scenarios.md +++ /dev/null @@ -1,1100 +0,0 @@ -# Application Scenarios - -You can use functions provided by A-Tune through the CLI client atune-adm. This chapter describes the functions and usage of the A-Tune client. - -- [Application Scenarios](#application-scenarios) - - [Overview](#overview-0) - - [Querying Workload Types](#querying-workload-types) - - [list](#list) - - [Workload Type Analysis and Auto Optimization](#workload-type-analysis-and-auto-optimization) - - [analysis](#analysis) - - [User-defined Model](#user-defined-model) - - [define](#define) - - [collection](#collection) - - [train](#train) - - [undefine](#undefine) - - [Querying Profiles](#querying-profiles) - - [info](#info) - - [Updating a Profile](#updating-a-profile) - - [update](#update) - - [Activating a Profile](#activating-a-profile) - - [profile](#profile) - - [Rolling Back Profiles](#rolling-back-profiles) - - [rollback](#rollback) - - [Updating Database](#updating-database) - - [upgrade](#upgrade) - - [Querying System Information](#querying-system-information) - - [check](#check) - - [Automatic Parameter Optimization](#automatic-parameter-optimization) - - [Tuning](#tuning) - - - -## Overview - -- You can run the **atune-adm help/--help/-h** command to query commands supported by atune-adm. -- All example commands are used in single-node mode. For distributed mode, specify an IP address and port number. For example: - - ``` - # atune-adm -a 192.168.3.196 -p 60001 list - ``` - -- The **define**, **update**, **undefine**, **collection**, **train**, and **upgrade **commands do not support remote execution. -- In the command format, brackets \(\[\]\) indicate that the parameter is optional, and angle brackets \(<\>\) indicate that the parameter is mandatory. The actual parameters prevail. - - -## Querying Workload Types - - - -### list - -#### Function - -Query the supported profiles, and the values of Active. - -#### Format - -**atune-adm list** - -#### Example - -``` -# atune-adm list - -Support profiles: -+------------------------------------------------+-----------+ -| ProfileName | Active | -+================================================+===========+ -| arm-native-android-container-robox | false | -+------------------------------------------------+-----------+ -| basic-test-suite-euleros-baseline-fio | false | -+------------------------------------------------+-----------+ -| basic-test-suite-euleros-baseline-lmbench | false | -+------------------------------------------------+-----------+ -| basic-test-suite-euleros-baseline-netperf | false | -+------------------------------------------------+-----------+ -| basic-test-suite-euleros-baseline-stream | false | -+------------------------------------------------+-----------+ -| basic-test-suite-euleros-baseline-unixbench | false | -+------------------------------------------------+-----------+ -| basic-test-suite-speccpu-speccpu2006 | false | -+------------------------------------------------+-----------+ -| basic-test-suite-specjbb-specjbb2015 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-hdfs-dfsio-hdd | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-hdfs-dfsio-ssd | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-bayesian | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-kmeans | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql1 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql10 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql2 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql3 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql4 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql5 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql6 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql7 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql8 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-sql9 | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-tersort | false | -+------------------------------------------------+-----------+ -| big-data-hadoop-spark-wordcount | false | -+------------------------------------------------+-----------+ -| cloud-compute-kvm-host | false | -+------------------------------------------------+-----------+ -| database-mariadb-2p-tpcc-c3 | false | -+------------------------------------------------+-----------+ -| database-mariadb-4p-tpcc-c3 | false | -+------------------------------------------------+-----------+ -| database-mongodb-2p-sysbench | false | -+------------------------------------------------+-----------+ -| database-mysql-2p-sysbench-hdd | false | -+------------------------------------------------+-----------+ -| database-mysql-2p-sysbench-ssd | false | -+------------------------------------------------+-----------+ -| database-postgresql-2p-sysbench-hdd | false | -+------------------------------------------------+-----------+ -| database-postgresql-2p-sysbench-ssd | false | -+------------------------------------------------+-----------+ -| default-default | false | -+------------------------------------------------+-----------+ -| docker-mariadb-2p-tpcc-c3 | false | -+------------------------------------------------+-----------+ -| docker-mariadb-4p-tpcc-c3 | false | -+------------------------------------------------+-----------+ -| hpc-gatk4-human-genome | false | -+------------------------------------------------+-----------+ -| in-memory-database-redis-redis-benchmark | false | -+------------------------------------------------+-----------+ -| middleware-dubbo-dubbo-benchmark | false | -+------------------------------------------------+-----------+ -| storage-ceph-vdbench-hdd | false | -+------------------------------------------------+-----------+ -| storage-ceph-vdbench-ssd | false | -+------------------------------------------------+-----------+ -| virtualization-consumer-cloud-olc | false | -+------------------------------------------------+-----------+ -| virtualization-mariadb-2p-tpcc-c3 | false | -+------------------------------------------------+-----------+ -| virtualization-mariadb-4p-tpcc-c3 | false | -+------------------------------------------------+-----------+ -| web-apache-traffic-server-spirent-pingpo | false | -+------------------------------------------------+-----------+ -| web-nginx-http-long-connection | true | -+------------------------------------------------+-----------+ -| web-nginx-https-short-connection | false | -+------------------------------------------------+-----------+ - -``` - ->![](public_sys-resources/icon-note.gif) **NOTE:** ->If the value of Active is **true**, the profile is activated. In the example, the profile of web-nginx-http-long-connection is activated. - -## Workload Type Analysis and Auto Optimization - - -### analysis - -#### Function - -Collect real-time statistics from the system to identify and automatically optimize workload types. - -#### Format - -**atune-adm analysis** \[OPTIONS\] - -#### Parameter Description - -- OPTIONS - - - - - - - - - - - - -

Parameter

-

Description

-

--model, -m

-

New model generated after user self-training

-

--characterization, -c

-

Use the default model for application identification and do not perform automatic optimization

-
- - -#### Example - -- Use the default model for application identification. - - ``` - # atune-adm analysis --characterization - ``` - -- Use the default model to identify applications and perform automatic tuning. - - ``` - # atune-adm analysis - ``` - -- Use the user-defined training model for recognition. - - ``` - # atune-adm analysis --model /usr/libexec/atuned/analysis/models/new-model.m - ``` - - -## User-defined Model - -A-Tune allows users to define and learn new models. To define a new model, perform the following steps: - -1. Run the **define** command to define a new profile. -2. Run the **collection** command to collect the system data corresponding to the application. -3. Run the **train** command to train the model. - - -### define - -#### Function - -Add a user-defined application scenarios and the corresponding profile tuning items. - -#### Format - -**atune-adm define** - -#### Example - -Add a profile whose service_type is **test_service**, application_name is **test_app**, scenario_name is **test_scenario**, and tuning item configuration file is **example.conf**. - -``` -# atune-adm define test_service test_app test_scenario ./example.conf -``` - -The **example.conf** file can be written as follows (the following optimization items are optional and are for reference only). You can also run the **atune-adm info** command to view how the existing profile is written. - -``` - [main] - # list its parent profile - [kernel_config] - # to change the kernel config - [bios] - # to change the bios config - [bootloader.grub2] - # to change the grub2 config - [sysfs] - # to change the /sys/* config - [systemctl] - # to change the system service status - [sysctl] - # to change the /proc/sys/* config - [script] - # the script extention of cpi - [ulimit] - # to change the resources limit of user - [schedule_policy] - # to change the schedule policy - [check] - # check the environment - [tip] - # the recommended optimization, which should be performed manunaly -``` - -### collection - -#### Function - -Collect the global resource usage and OS status information during service running, and save the collected information to a CSV output file as the input dataset for model training. - ->![](public_sys-resources/icon-note.gif) **NOTE:** ->- This command depends on the sampling tools such as perf, mpstat, vmstat, iostat, and sar. ->- Currently, only the Kunpeng 920 CPU is supported. You can run the **dmidecode -t processor** command to check the CPU model. - -#### Format - -**atune-adm collection** - -#### Parameter Description - -- OPTIONS - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

--filename, -f

-

Name of the generated CSV file used for training: name-timestamp.csv

-

--output_path, -o

-

Path for storing the generated CSV file. The absolute path is required.

-

--disk, -b

-

Disk used during service running, for example, /dev/sda.

-

--network, -n

-

Network port used during service running, for example, eth0.

-

--app_type, -t

-

Mark the application type of the service as a label for training.

-

--duration, -d

-

Data collection time during service running, in seconds. The default collection time is 1200 seconds.

-

--interval, -i

-

Interval for collecting data, in seconds. The default interval is 5 seconds.

-
- - -#### Example - -``` -# atune-adm collection --filename name --interval 5 --duration 1200 --output_path /home/data --disk sda --network eth0 --app_type test_type -``` - -### train - -#### Function - -Use the collected data to train the model. Collect data of at least two application types during training. Otherwise, an error is reported. - -#### Format - -**atune-adm train** - -#### Parameter Description - -- OPTIONS - - | Parameter | Description | - | ----------------- | ------------------------------------------------------ | - | --data_path, -d | Path for storing CSV files required for model training | - | --output_file, -o | Model generated through training | - - -#### Example - -Use the CSV file in the **data** directory as the training input. The generated model **new-model.m** is stored in the **model** directory. - -``` -# atune-adm train --data_path /home/data --output_file /usr/libexec/atuned/analysis/models/new-model.m -``` - -### undefine - -#### Function - -Delete a user-defined profile. - -#### Format - -**atune-adm undefine** - -#### Example - -Delete the user-defined profile. - -``` -# atune-adm undefine test_service-test_app-test_scenario -``` - -## Querying Profiles - - -### info - -#### Function - -View the profile content. - -#### Format - -**atune-adm info** - -#### Example - -View the profile content of web-nginx-http-long-connection. - -``` -# atune-adm info web-nginx-http-long-connection - -*** web-nginx-http-long-connection: - -# -# nginx http long connection A-Tune configuration -# -[main] -include = default-default - -[kernel_config] -#TODO CONFIG - -[bios] -#TODO CONFIG - -[bootloader.grub2] -iommu.passthrough = 1 - -[sysfs] -#TODO CONFIG - -[systemctl] -sysmonitor = stop -irqbalance = stop - -[sysctl] -fs.file-max = 6553600 -fs.suid_dumpable = 1 -fs.aio-max-nr = 1048576 -kernel.shmmax = 68719476736 -kernel.shmall = 4294967296 -kernel.shmmni = 4096 -kernel.sem = 250 32000 100 128 -net.ipv4.tcp_tw_reuse = 1 -net.ipv4.tcp_syncookies = 1 -net.ipv4.ip_local_port_range = 1024 65500 -net.ipv4.tcp_max_tw_buckets = 5000 -net.core.somaxconn = 65535 -net.core.netdev_max_backlog = 262144 -net.ipv4.tcp_max_orphans = 262144 -net.ipv4.tcp_max_syn_backlog = 262144 -net.ipv4.tcp_timestamps = 0 -net.ipv4.tcp_synack_retries = 1 -net.ipv4.tcp_syn_retries = 1 -net.ipv4.tcp_fin_timeout = 1 -net.ipv4.tcp_keepalive_time = 60 -net.ipv4.tcp_mem = 362619 483495 725238 -net.ipv4.tcp_rmem = 4096 87380 6291456 -net.ipv4.tcp_wmem = 4096 16384 4194304 -net.core.wmem_default = 8388608 -net.core.rmem_default = 8388608 -net.core.rmem_max = 16777216 -net.core.wmem_max = 16777216 - -[script] -prefetch = off -ethtool = -X {network} hfunc toeplitz - -[ulimit] -{user}.hard.nofile = 102400 -{user}.soft.nofile = 102400 - -[schedule_policy] -#TODO CONFIG - -[check] -#TODO CONFIG - -[tip] -SELinux provides extra control and security features to linux kernel. Disabling SELinux will improve the performance but may cause security risks. = kernel -disable the nginx log = application -``` - -## Updating a Profile - -You can update the existing profile as required. - - -### update - -#### Function - -Update the original tuning items in the existing profile to the content in the **new.conf** file. - -#### Format - -**atune-adm update** - -#### Example - -Change the tuning item of the profile named **test_service-test_app-test_scenario** to **new.conf**. - -``` -# atune-adm update test_service-test_app-test_scenario ./new.conf -``` - -## Activating a Profile - -### profile - -#### Function - -Manually activate the profile to make it in the active state. - -#### Format - -**atune-adm profile** - -#### Parameter Description - -For details about the profile name, see the query result of the list command. - -#### Example - -Activate the profile corresponding to the web-nginx-http-long-connection. - -``` -# atune-adm profile web-nginx-http-long-connection -``` - -## Rolling Back Profiles - -### rollback - -#### Functions - -Roll back the current configuration to the initial configuration of the system. - -#### Format - -**atune-adm rollback** - -#### Example - -``` -# atune-adm rollback -``` - -## Updating Database - -### upgrade - -#### Function - -Update the system database. - -#### Format - -**atune-adm upgrade** - -#### Parameter Description - -- DB\_FILE - - New database file path. - - -#### Example - -The database is updated to **new\_sqlite.db**. - -``` -# atune-adm upgrade ./new_sqlite.db -``` - -## Querying System Information - - -### check - -#### Function - -Check the CPU, BIOS, OS, and NIC information. - -#### Format - -**atune-adm check** - -#### Example - -``` -# atune-adm check - cpu information: - cpu:0 version: Kunpeng 920-6426 speed: 2600000000 HZ cores: 64 - cpu:1 version: Kunpeng 920-6426 speed: 2600000000 HZ cores: 64 - system information: - DMIBIOSVersion: 0.59 - OSRelease: 4.19.36-vhulk1906.3.0.h356.eulerosv2r8.aarch64 - network information: - name: eth0 product: HNS GE/10GE/25GE RDMA Network Controller - name: eth1 product: HNS GE/10GE/25GE Network Controller - name: eth2 product: HNS GE/10GE/25GE RDMA Network Controller - name: eth3 product: HNS GE/10GE/25GE Network Controller - name: eth4 product: HNS GE/10GE/25GE RDMA Network Controller - name: eth5 product: HNS GE/10GE/25GE Network Controller - name: eth6 product: HNS GE/10GE/25GE RDMA Network Controller - name: eth7 product: HNS GE/10GE/25GE Network Controller - name: docker0 product: -``` - -## Automatic Parameter Optimization - -A-Tune provides the automatic search capability with the optimal configuration, saving the trouble of manually configuring parameters and performance evaluation. This greatly improves the search efficiency of optimal configurations. - - -### Tuning - -#### Function - -Use the specified project file to search the dynamic space for parameters and find the optimal solution under the current environment configuration. - -#### Format - -**atune-adm tuning** \[OPTIONS\] - ->![](public_sys-resources/icon-note.gif) **NOTE:** ->Before running the command, ensure that the following conditions are met: ->1. The YAML configuration file on the server has been edited and stored in the **/etc/atuned/tuning/** directory of the atuned service. ->2. The YAML configuration file of the client has been edited and stored on the atuned client. - -#### Parameter Description - -- OPTIONS - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

--restore, -r

-

Restores the initial configuration before tuning.

-

--project, -p

-

Specifies the project name in the YAML file to be restored.

-

--restart, -c

-

Perform tuning based on historical tuning results.

-

--detail, -d

-

Print detailed information about the tuning process.

-
- - - >![](public_sys-resources/icon-note.gif) **NOTE:** - >If this parameter is used, the -p parameter must be followed by a specific project name and the YAML file of the project must be specified. - - -- **PROJECT\_YAML**: YAML configuration file of the client. - -#### Configuration Description - -**Table 1** YAML file on the server - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Name

-

Description

-

Type

-

Value Range

-

project

-

Project name.

-

Character string

-

-

-

startworkload

-

Script for starting the service to be optimized.

-

Character string

-

-

-

stopworkload

-

Script for stopping the service to be optimized.

-

Character string

-

-

-

maxiterations

-

Maximum number of optimization iterations, which is used to limit the number of iterations on the client. Generally, the more optimization iterations, the better the optimization effect, but the longer the time required. Set this parameter based on the site requirements.

-

Integer

-

>10

-

object

-

Parameters to be optimized and related information.

-

For details about the object configuration items, see Table 2.

-

-

-

-

-
- -**Table 2** Description of object configuration items - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Name

-

Description

-

Type

-

Value Range

-

name

-

Parameter to be optimized.

-

Character string

-

-

-

desc

-

Description of parameters to be optimized.

-

Character string

-

-

-

get

-

Script for querying parameter values.

-

-

-

-

-

set

-

Script for setting parameter values.

-

-

-

-

-

needrestart

-

Specifies whether to restart the service for the parameter to take effect.

-

Enumeration

-

true or false

-

type

-

Parameter type. Currently, the discrete and continuous types are supported.

-

Enumeration

-

discrete or continuous

-

dtype

-

This parameter is available only when type is set to discrete. Currently, int, float and string are supported.

-

Enumeration

-

int, float, string

-

scope

-

Parameter setting range. This parameter is valid only when type is set to discrete and dtype is set to int or float, or type is set to continuous.

-

Integer/Float

-

The value is user-defined and must be within the valid range of this parameter.

-

step

-

Parameter value step, which is used when dtype is set to int or float.

-

Integer/Float

-

This value is user-defined.

-

items

-

Enumerated value of which the parameter value is not within the scope. This is used when dtype is set to int or float.

-

Integer/Float

-

The value is user-defined and must be within the valid range of this parameter.

-

options

-

Enumerated value range of the parameter value, which is used when dtype is set to string.

-

Character string

-

The value is user-defined and must be within the valid range of this parameter.

-
- -**Table 3** Description of configuration items of a YAML file on the client - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Name

-

Description

-

Type

-

Value Range

-

project

-

Project name, which must be the same as that in the configuration file on the server.

-

Character string

-

-

-

engine

-

Tuning algorithm.

-

Character string

-

"random", "forest", "gbrt", "bayes", "extraTrees"

-

iterations

-

Number of optimization iterations.

-

Integer

-

≥ 10

-

random_starts

-

Number of random iterations.

-

Integer

-

< iterations

-

feature_filter_engine

-

Parameter search algorithm, which is used to select important parameters. This parameter is optional.

-

Character string

-

"lhs"

-

feature_filter_cycle

-

Parameter search cycles, which is used to select important parameters. This parameter is used together with feature_filter_engine.

-

Integer

-

-

-

feature_filter_iters

-

Number of iterations for each cycle of parameter search, which is used to select important parameters. This parameter is used together with feature_filter_engine.

-

Integer

-

-

-

split_count

-

Number of evenly selected parameters in the value range of tuning parameters, which is used to select important parameters. This parameter is used together with feature_filter_engine.

-

Integer

-

-

-

benchmark

-

Performance test script.

-

-

-

-

-

evaluations

-

Performance test evaluation index.

-

For details about the evaluations configuration items, see Table 4.

-

-

-

-

-
- - -**Table 4** Description of evaluations configuration item - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Name

-

Description

-

Type

-

Value Range

-

name

-

Evaluation index name.

-

Character string

-

-

-

get

-

Script for obtaining performance evaluation results.

-

-

-

-

-

type

-

Specifies a positive or negative type of the evaluation result. The value positive indicates that the performance value is minimized, and the value negative indicates that the performance value is maximized.

-

Enumeration

-

positive or negative

-

weight

-

Weight of the index. The value ranges from 0 to 100.

-

Integer

-

0-100

-

threshold

-

Minimum performance requirement of the index.

-

Integer

-

User-defined

-
- -#### Example - -The following is an example of the YAML file configuration on a server: - -``` -project: "compress" -maxiterations: 500 -startworkload: "" -stopworkload: "" -object : - - - name : "compressLevel" - info : - desc : "The compresslevel parameter is an integer from 1 to 9 controlling the level of compression" - get : "cat /root/A-Tune/examples/tuning/compress/compress.py | grep 'compressLevel=' | awk -F '=' '{print $2}'" - set : "sed -i 's/compressLevel=\\s*[0-9]*/compressLevel=$value/g' /root/A-Tune/examples/tuning/compress/compress.py" - needrestart : "false" - type : "continuous" - scope : - - 1 - - 9 - dtype : "int" - - - name : "compressMethod" - info : - desc : "The compressMethod parameter is a string controlling the compression method" - get : "cat /root/A-Tune/examples/tuning/compress/compress.py | grep 'compressMethod=' | awk -F '=' '{print $2}' | sed 's/\"//g'" - set : "sed -i 's/compressMethod=\\s*[0-9,a-z,\"]*/compressMethod=\"$value\"/g' /root/A-Tune/examples/tuning/compress/compress.py" - needrestart : "false" - type : "discrete" - options : - - "bz2" - - "zlib" - - "gzip" - dtype : "string" -``` - -   - -The following is an example of the YAML file configuration on a client: - -``` -project: "compress" -engine : "gbrt" -iterations : 20 -random_starts : 10 - -benchmark : "python3 /root/A-Tune/examples/tuning/compress/compress.py" -evaluations : - - - name: "time" - info: - get: "echo '$out' | grep 'time' | awk '{print $3}'" - type: "positive" - weight: 20 - - - name: "compress_ratio" - info: - get: "echo '$out' | grep 'compress_ratio' | awk '{print $3}'" - type: "negative" - weight: 80 -``` - -   - -#### Example - -- Perform tuning. - - ``` - # atune-adm tuning --project compress --detail compress_client.yaml - ``` - -- Restore the initial configuration before tuning. The compress is the project name in the YAML file. - - ``` - # atune-adm tuning --restore --project compress - ``` - - diff --git a/docs/en/docs/A-Tune/faqs.md b/docs/en/docs/A-Tune/faqs.md deleted file mode 100644 index 0a350b3ebf59fe290d0be52a0c9bd838bc54df4a..0000000000000000000000000000000000000000 --- a/docs/en/docs/A-Tune/faqs.md +++ /dev/null @@ -1,57 +0,0 @@ -# FAQs - -## Q1: An error occurs when the **train** command is used to train a model, and the message "training data failed" is displayed. - -Cause: Only one type of data is collected by using the **collection **command. - -Solution: Collect data of at least two data types for training. - - - -## Q2: The atune-adm cannot connect to the atuned service. - -Possible cause: - -1. Check whether the atuned service is started and check the atuned listening address. - - ``` - # systemctl status atuned - # netstat -nap | atuned - ``` - -2. The firewall blocks the atuned listening port. -3. The HTTP proxy is configured in the system. As a result, the connection fails. - -Solution: - -1. If the atuned service is not started, run the following command to start the service: - - ``` - # systemctl start atuned - ``` - -2. Run the following command on the atuned and atune-adm servers to allow the listening port to receive network packets. In the command, **60001** is the listening port number of the atuned server. - - ``` - # iptables -I INPUT -p tcp --dport 60001 -j ACCEPT - # iptables -I INPUT -p tcp --sport 60001 -j ACCEPT - ``` - - -1. Run the following command to delete the HTTP proxy or disable the HTTP proxy for the listening IP address without affecting services: - - ``` - # no_proxy=$no_proxy, Listening IP address - ``` - - -## Q3: The atuned service cannot be started, and the message "Job for atuned.service failed because a timeout was exceeded." is displayed. - -Cause: The hosts file does not contain the localhost information. - -Solution: Add localhost to the line starting with **127.0.0.1** in the **/etc/hosts** file. - -``` -127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 -``` - diff --git a/docs/en/docs/A-Tune/figures/en-us_image_0213178479.png b/docs/en/docs/A-Tune/figures/en-us_image_0213178479.png deleted file mode 100644 index 62ef0decdf6f1e591059904001d712a54f727e68..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/figures/en-us_image_0213178479.png and /dev/null differ diff --git a/docs/en/docs/A-Tune/figures/en-us_image_0213178480.png b/docs/en/docs/A-Tune/figures/en-us_image_0213178480.png deleted file mode 100644 index ad5ed3f7beeb01e6a48707c4806606b41d687e22..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/figures/en-us_image_0213178480.png and /dev/null differ diff --git a/docs/en/docs/A-Tune/figures/en-us_image_0214540398.png b/docs/en/docs/A-Tune/figures/en-us_image_0214540398.png deleted file mode 100644 index cea2292307b57854aa629ec102a5bc1b16d244a0..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/figures/en-us_image_0214540398.png and /dev/null differ diff --git a/docs/en/docs/A-Tune/figures/en-us_image_0227497000.png b/docs/en/docs/A-Tune/figures/en-us_image_0227497000.png deleted file mode 100644 index 3df66e5f25177cba7fe65cfb859fab860bfb7b46..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/figures/en-us_image_0227497000.png and /dev/null differ diff --git a/docs/en/docs/A-Tune/figures/en-us_image_0227497343.png b/docs/en/docs/A-Tune/figures/en-us_image_0227497343.png deleted file mode 100644 index a8654b170295b4b0be3c37187e4b227ca635fbc0..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/figures/en-us_image_0227497343.png and /dev/null differ diff --git a/docs/en/docs/A-Tune/figures/en-us_image_0231122163.png b/docs/en/docs/A-Tune/figures/en-us_image_0231122163.png deleted file mode 100644 index c61c39c5f5119d84c6799b1e17285a7fe313639f..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/figures/en-us_image_0231122163.png and /dev/null differ diff --git a/docs/en/docs/A-Tune/figures/en-us_image_0245342444.png b/docs/en/docs/A-Tune/figures/en-us_image_0245342444.png deleted file mode 100644 index 10f0fceb42c00c80ef49decdc0c480eb04c2ca6d..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/figures/en-us_image_0245342444.png and /dev/null differ diff --git a/docs/en/docs/A-Tune/getting-to-know-a-tune.md b/docs/en/docs/A-Tune/getting-to-know-a-tune.md deleted file mode 100644 index 2092e0152e2c31ea4bf1aa95277302bcc981b6a9..0000000000000000000000000000000000000000 --- a/docs/en/docs/A-Tune/getting-to-know-a-tune.md +++ /dev/null @@ -1,195 +0,0 @@ -# Getting to Know A-Tune - -- [Getting to Know A-Tune](#getting-to-know-a-tune) - - [Introduction](#introduction) - - [Architecture](#architecture) - - [Supported Features and Service Models](#supported-features-and-service-models) - - - -## Introduction - -An operating system \(OS\) is basic software that connects applications and hardware. It is critical for users to adjust OS and application configurations and make full use of software and hardware capabilities to achieve optimal service performance. However, numerous workload types and varied applications run on the OS, and the requirements on resources are different. Currently, the application environment composed of hardware and software involves more than 7000 configuration objects. As the service complexity and optimization objects increase, the time cost for optimization increases exponentially. As a result, optimization efficiency decreases sharply. Optimization becomes complex and brings great challenges to users. - -Second, as infrastructure software, the OS provides a large number of software and hardware management capabilities. The capability required varies in different scenarios. Therefore, capabilities need to be enabled or disabled depending on scenarios, and a combination of capabilities will maximize the optimal performance of applications. - -In addition, the actual business embraces hundreds and thousands of scenarios, and each scenario involves a wide variety of hardware configurations for computing, network, and storage. The lab cannot list all applications, business scenarios, and hardware combinations. - -To address the preceding challenges, openEuler launches A-Tune. - -A-Tune is an AI-based engine that optimizes system performance. It uses AI technologies to precisely profile business scenarios, discover and infer business characteristics, so as to make intelligent decisions, match with the optimal system parameter configuration combination, and give recommendations, ensuring the optimal business running status. - -![](figures/en-us_image_0227497000.png) - -## Architecture - -The following figure shows the A-Tune core technical architecture, which consists of intelligent decision-making, system profile, and interaction system. - -- Intelligent decision-making layer: consists of the awareness and decision-making subsystems, which implements intelligent awareness of applications and system optimization decision-making, respectively. -- System profile layer: consists of the feature engineering and two-layer classification model. The feature engineering is used to automatically select service features, and the two-layer classification model is used to learn and classify service models. -- Interaction system layer: monitors and configures various system resources and executes optimization policies. - -![](figures/en-us_image_0227497343.png) - -## Supported Features and Service Models - -### Supported Features - -[Table 1](#table1919220557576) describes the main features supported by A-Tune, feature maturity, and usage suggestions. - -**Table 1** Feature maturity - - - - - - - - - - - - - - - - - - - -

Feature

-

Maturity

-

Usage Suggestion

-

Auto optimization of 15 applications in 11 workload types

-

Tested

-

Pilot

-

User-defined profile and service models

-

Tested

-

Pilot

-

Automatic parameter optimization

-

Tested

-

Pilot

-
- - -### Supported Service Models - -Based on the workload characteristics of applications, A-Tune classifies services into 11 types. For details about the bottleneck of each type and the applications supported by A-Tune, see [Table 2](#table2819164611311). - -**Table 2** Supported workload types and applications - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Service category

-

Type

-

Bottleneck

-

Supported Application

-

default

-

Default type

-

Low resource usage in terms of cpu, memory, network, and I/O

-

N/A

-

webserver

-

Web application

-

Bottlenecks of cpu and network

-

Nginx, Apache Traffic Server

-

database

-

Database

-
Bottlenecks of cpu, memory, and I/O

-

Mongodb, Mysql, Postgresql, Mariadb

-

big_data

-

Big data

-

Bottlenecks of cpu and memory

-

Hadoop-hdfs, Hadoop-spark

-

middleware

-

Middleware framework

-

Bottlenecks of cpu and network

-

Dubbo

-

in-memory_database

-

Memory database

-

Bottlenecks of memory and I/O

-

Redis

-

basic-test-suite

-

Basic test suite

-

Bottlenecks of cpu and memory

-

SPECCPU2006, SPECjbb2015

-

hpc

-

Human genome

-

Bottlenecks of cpu, memory, and I/O

-

Gatk4

-

storage

-

Storage

-

Bottlenecks of network, and I/O

-

Ceph

-

virtualization

-

Virtualization

-

Bottlenecks of cpu, memory, and I/O

-

Consumer-cloud, Mariadb

-

docker

-

Docker

-

Bottlenecks of cpu, memory, and I/O

-

Mariadb

-
- - - diff --git a/docs/en/docs/A-Tune/installation-and-deployment.md b/docs/en/docs/A-Tune/installation-and-deployment.md deleted file mode 100644 index c1a9b811734c75446b7d6550786462628ea78c51..0000000000000000000000000000000000000000 --- a/docs/en/docs/A-Tune/installation-and-deployment.md +++ /dev/null @@ -1,361 +0,0 @@ -# Installation and Deployment - -This chapter describes how to install and deploy A-Tune. - -- [Installation and Deployment](#installation-and-deployment) - - [Software and Hardware Requirements](#software-and-hardware-requirements) - - [Environment Preparation](#environment-preparation) - - [A-Tune Installation](#a-tune-installation) - - [Installation Modes](#installation-modes) - - [Installation Procedure](#installation-procedure) - - [A-Tune Deployment](#a-tune-deployment) - - [Starting A-Tune](#starting-a-tune) - - [Starting A-Tune engine](#starting-a-tune-engine) - - - - -## Software and Hardware Requirements - -### Hardware Requirement - -- Huawei Kunpeng 920 processor - -### Software Requirement - -- OS: openEuler 21.03 - -## Environment Preparation - -For details about installing an openEuler OS, see _openEuler 21.03 Installation Guide_. - -## A-Tune Installation - -This chapter describes the installation modes and methods of the A-Tune. - -### Installation Modes - -A-Tune can be installed in single-node or distributed mode. - -- Single-node mode - - The client and server are installed on the same system. - -- Distributed mode - - The client and server are installed on different systems. - - -The installation modes are as follows: - -![](./figures/en-us_image_0231122163.png) - -   - -### Installation Procedure - -To install the A-Tune, perform the following steps: - -1. Mount an openEuler ISO file. - - ``` - # mount openEuler-21.03-aarch64-dvd.iso /mnt - ``` - -2. Configure the local yum source. - - ``` - # vim /etc/yum.repos.d/local.repo - ``` - - The configured contents are as follows: - - ``` - [local] - name=local - baseurl=file:///mnt - gpgcheck=1 - enabled=1 - ``` - -3. Import the GPG public key of the RPM digital signature to the system. - - ``` - # rpm --import /mnt/RPM-GPG-KEY-openEuler - ``` - - -4. Install an A-Tune server. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >In this step, both the server and client software packages are installed. For the single-node deployment, skip **Step 5**. - - ``` - # yum install atune -y - # yum install atune-engine -y - ``` - -5. For a distributed mode, install an A-Tune client on associated server. - - ``` - # yum install atune-client -y - ``` - -6. Check whether the installation is successful. - - ``` - # rpm -qa | grep atune - atune-client-xxx - atune-db-xxx - atune-xxx - atune-engine-xxx - ``` - - If the preceding information is displayed, the installation is successful. - - -## A-Tune Deployment - -This chapter describes how to deploy A-Tune. - - - -### Overview - -The configuration items in the A-Tune configuration file **/etc/atuned/atuned.cnf** are described as follows: - -- A-Tune service startup configuration - - You can modify the parameter value as required. - - - **protocol**: Protocol used by the gRPC service. The value can be **unix** or **tcp**. **unix** indicates the local socket communication mode, and **tcp** indicates the socket listening port mode. The default value is **unix**. - - **address**: Listening IP address of the gRPC service. The default value is **unix socket**. If the gRPC service is deployed in distributed mode, change the value to the listening IP address. - - **port**: Listening port of the gRPC server. The value ranges from 0 to 65535. If **protocol** is set to **unix**, you do not need to set this parameter. - - **connect**: IP address list of the nodes where the A-Tune is located when the A-Tune is deployed in a cluster. IP addresses are separated by commas (,). - - **rest_host**: Listening address of the REST service. The default value is localhost. - - **rest_port**: Listening port of the REST service. The value ranges from 0 to 65535. The default value is 8383. - - **engine_host**: IP address for connecting to the A-Tune engine service of the system. - - **engine_port**: Port for connecting to the A-Tune engine service of the system. - - **sample_num**: Number of samples collected when the system executes the analysis process. The default value is 20. - - **interval**: Interval for collecting samples when the system executes the analysis process. The default value is 5s. - - **grpc_tls**: Indicates whether to enable SSL/TLS certificate verification for the gRPC service. By default, this function is disabled. After grpc_tls is enabled, you need to set the following environment variables before running the **atune-adm** command to communicate with the server: - - export ATUNE_TLS=yes - - export ATUNED_CACERT= - - export ATUNED_CLIENTCERT= - - export ATUNED_CLIENTKEY= - - export ATUNED_SERVERCN=server - - **tlsservercafile**: Path of the gPRC server's CA certificate. - - **tlsservercertfile**: Path of the gPRC server certificate. - - **tlsserverkeyfile**: Path of the gPRC server key. - - **rest_tls**: Indicates whether to enable SSL/TLS certificate verification for the REST service. This function is enabled by default. - - **tlsrestcacertfile**: Path of the server's CA certificate of the REST service. - - **tlsrestservercertfile**: Path of the server certificate of the REST service. - - **tlsrestserverkeyfile**: Indicates the key path of the REST service. - - **engine_tls**: Indicates whether to enable SSL/TLS certificate verification for the A-Tune engine service. This function is enabled by default.. - - **tlsenginecacertfile**: Path of the client CA certificate of the A-Tune engine service. - - **tlsengineclientcertfile**: Client certificate path of the A-Tune engine service. - - **tlsengineclientkeyfile**: Client key path of the A-Tune engine service. - -- System information - - System is the parameter information required for system optimization. You must modify the parameter information according to the actual situation. - - - **disk**: Disk information to be collected during the analysis process or specified disk during disk optimization. - - **network**: NIC information to be collected during the analysis process or specified NIC during NIC optimization. - - **user**: User name used for ulimit optimization. Currently, only the user **root** is supported. - -- Log information - - Change the log level as required. The default log level is info. Log information is recorded in the **/var/log/messages** file. - -- Monitor information - - Hardware information that is collected by default when the system is started. - -- Tuning information - - Tuning is the parameter information required for offline tuning. - - - **noise**: Evaluation value of Gaussian noise. - - **sel_feature**: Indicates whether to enable the function of generating the importance ranking of offline tuning parameters. By default, this function is disabled. - - -#### Example - -``` -#################################### server ############################### - # atuned config - [server] - # the protocol grpc server running on - # ranges: unix or tcp - protocol = unix - - # the address that the grpc server to bind to - # default is unix socket /var/run/atuned/atuned.sock - # ranges: /var/run/atuned/atuned.sock or ip address - address = /var/run/atuned/atuned.sock - - # the atune nodes in cluster mode, separated by commas - # it is valid when protocol is tcp - # connect = ip01,ip02,ip03 - - # the atuned grpc listening port - # the port can be set between 0 to 65535 which not be used - # port = 60001 - - # the rest service listening port, default is 8383 - # the port can be set between 0 to 65535 which not be used - rest_host = localhost - rest_port = 8383 - - # the tuning optimizer host and port, start by engine.service - # if engine_host is same as rest_host, two ports cannot be same - # the port can be set between 0 to 65535 which not be used - engine_host = localhost - engine_port = 3838 - - # when run analysis command, the numbers of collected data. - # default is 20 - sample_num = 20 - - # interval for collecting data, default is 5s - interval = 5 - - # enable gRPC authentication SSL/TLS - # default is false - # grpc_tls = false - # tlsservercafile = /etc/atuned/grpc_certs/ca.crt - # tlsservercertfile = /etc/atuned/grpc_certs/server.crt - # tlsserverkeyfile = /etc/atuned/grpc_certs/server.key - - # enable rest server authentication SSL/TLS - # default is true - rest_tls = true - tlsrestcacertfile = /etc/atuned/rest_certs/ca.crt - tlsrestservercertfile = /etc/atuned/rest_certs/server.crt - tlsrestserverkeyfile = /etc/atuned/rest_certs/server.key - - # enable engine server authentication SSL/TLS - # default is true - engine_tls = true - tlsenginecacertfile = /etc/atuned/engine_certs/ca.crt - tlsengineclientcertfile = /etc/atuned/engine_certs/client.crt - tlsengineclientkeyfile = /etc/atuned/engine_certs/client.key - - - #################################### log ############################### - [log] - # either "debug", "info", "warn", "error", "critical", default is "info" - level = info - - #################################### monitor ############################### - [monitor] - # with the module and format of the MPI, the format is {module}_{purpose} - # the module is Either "mem", "net", "cpu", "storage" - # the purpose is "topo" - module = mem_topo, cpu_topo - - #################################### system ############################### - # you can add arbitrary key-value here, just like key = value - # you can use the key in the profile - [system] - # the disk to be analysis - disk = sda - - # the network to be analysis - network = enp189s0f0 - - user = root - - #################################### tuning ############################### - # tuning configs - [tuning] - noise = 0.000000001 - sel_feature = false -``` - -The configuration items in the configuration file **/etc/atuned/engine.cnf** of the A-Tune engine are described as follows: - -- Startup configuration of the A-Tune engine service - - You can modify the parameter value as required. - - - **engine_host**: Listening address of the A-Tune engine service. The default value is localhost. - - **engine_port**: Listening port of the A-Tune engine service. The value ranges from 0 to 65535. The default value is 3838. - - **engine_tls**: Indicates whether to enable SSL/TLS certificate verification for the A-Tune engine service. This function is enabled by default. - - **tlsenginecacertfile**: Path of the server CA certificate of the A-Tune engine service. - - **tlsengineservercertfile**: Path of the server certificate of the A-Tune engine service. - - **tlsengineserverkeyfile**: Server key path of the A-Tune engine service. - -- Log information - - Change the log level as required. The default log level is info. Log information is recorded in the **/var/log/messages** file. - - -#### Example - -``` -#################################### engine ############################### - [server] - # the tuning optimizer host and port, start by engine.service - # if engine_host is same as rest_host, two ports cannot be same - # the port can be set between 0 to 65535 which not be used - engine_host = localhost - engine_port = 3838 - - # enable engine server authentication SSL/TLS - # default is true - engine_tls = true - tlsenginecacertfile = /etc/atuned/engine_certs/ca.crt - tlsengineservercertfile = /etc/atuned/engine_certs/server.crt - tlsengineserverkeyfile = /etc/atuned/engine_certs/server.key - - #################################### log ############################### - [log] - # either "debug", "info", "warn", "error", "critical", default is "info" - level = info -``` - -## Starting A-Tune - -After the A-Tune is installed, you need to start the A-Tune service. - -- Start the atuned service. - - ``` - # systemctl start atuned - ``` - - -- Query the atuned service status. - - ``` - # systemctl status atuned - ``` - - If the following command output is displayed, the service is started successfully: - - ![](./figures/en-us_image_0214540398.png) - -## Starting A-Tune engine - -To use AI functions, you need to start the A-Tune engine service. - -- Start the atune-engine service. - - ``` - # systemctl start atune-engine - ``` - - -- Query the atune-engine service status. - - ``` - # systemctl status atune-engine - ``` - - If the following command output is displayed, the service is started successfully: - - ![](./figures/en-us_image_0245342444.png) - - diff --git a/docs/en/docs/A-Tune/public_sys-resources/icon-caution.gif b/docs/en/docs/A-Tune/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/A-Tune/public_sys-resources/icon-danger.gif b/docs/en/docs/A-Tune/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/A-Tune/public_sys-resources/icon-note.gif b/docs/en/docs/A-Tune/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/A-Tune/public_sys-resources/icon-notice.gif b/docs/en/docs/A-Tune/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/A-Tune/public_sys-resources/icon-tip.gif b/docs/en/docs/A-Tune/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/A-Tune/public_sys-resources/icon-warning.gif b/docs/en/docs/A-Tune/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/A-Tune/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/Administration/administration.md b/docs/en/docs/Administration/administration.md deleted file mode 100644 index 0c32e256b1d6e2e2f7b268b47f7466b660f7744b..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/administration.md +++ /dev/null @@ -1,4 +0,0 @@ -# Administrator Guide - -This document provides common administrator operations of the openEuler system to help administrators better use the system. -This document is intended for all administrators who use openEuler. diff --git a/docs/en/docs/Administration/basic-configuration.md b/docs/en/docs/Administration/basic-configuration.md deleted file mode 100644 index 617937b9bf1e85b2a9d6ae00f227c3ef2f39678c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/basic-configuration.md +++ /dev/null @@ -1,468 +0,0 @@ -# Basic Configuration - - - -- [Basic Configuration](#basic-configuration) - - [Setting the System Locale](#setting-the-system-locale) - - [Displaying the Current Locale Status](#displaying-the-current-locale-status) - - [Listing Available Locales](#listing-available-locales) - - [Setting the Locale](#setting-the-locale) - - [Setting the Keyboard Layout](#setting-the-keyboard-layout) - - [Displaying the Current Settings](#displaying-the-current-settings) - - [Listing Available Keyboard Layouts](#listing-available-keyboard-layouts) - - [Setting the Keyboard Layout](#setting-the-keyboard-layout-1) - - [Setting the Date and Time](#setting-the-date-and-time) - - [Using the timedatectl Command](#using-the-timedatectl-command) - - [Using the date Command](#using-the-date-command) - - [Using the hwclock Command](#using-the-hwclock-command) - - [Setting kdump](#setting-kdump) - - [Setting the Memory Reserved for kdump](#setting-the-memory-reserved-for-kdump) - - [Recommended Reserved Memory](#recommended-reserved-memory) - - [Disabling Network Drivers](#disabling-network-drivers) - - -## Setting the System Locale - -System locale settings are stored in the /etc/locale.conf file and can be modified by the localectl command. These settings are read at system boot by the systemd daemon. - -### Displaying the Current Locale Status - -To display the current locale status, run the following command: - -``` -$ localectl status -``` - -Example command output: - -``` -$ localectl status - System Locale: LANG=zh_CN.UTF-8 - VC Keymap: cn - X11 Layout: cn -``` - -### Listing Available Locales - -To display available locales, run the following command: - -``` -$ localectl list-locales -``` - -You can check that by listing all Chinese locales with the following command: - -``` -$ localectl list-locales | grep zh -zh_CN.UTF-8 -``` - -### Setting the Locale - -To set the language environment, run the following command as the user **root**. In the command, _locale_ indicates the language type to be set. Run the **localectl list-locales** command to obtain the value range. Change the value based on the site requirements. - -``` -# localectl set-locale LANG=locale -``` - -For example, if you want to use Simplified Chinese as the locale, run the following command as the user **root**: - -``` -# localectl set-locale LANG=zh_CN.UTF-8 -``` - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** - -> After the modification, log in again or run the command `source /etc/locale.conf` as the user **root** to update the configuration file for the modification to take effect: - -## Setting the Keyboard Layout - -Keyboard layout settings are stored in the /etc/locale.conf file and can be modified by the localectl command. These settings are read at early boot by the systemd daemon. - -### Displaying the Current Settings - -To display the current keyboard layout settings, run the following command: - -``` -$ localectl status -``` - -Example command output: - -``` -$ localectl status - System Locale: LANG=zh_CN.UTF-8 - VC Keymap: cn - X11 Layout: cn -``` - -### Listing Available Keyboard Layouts - -To list all available keyboard layouts that can be configured on openEuler, run the following command: - -``` -$ localectl list-keymaps -``` - -For example, the command output of the Chinese keyboard layout is as follows: - -``` -$ localectl list-keymaps | grep cn -cn -``` - -### Setting the Keyboard Layout - -To set the keyboard layout, run the following command as the user **root**. In the command, _map_ indicates the keyboard layout to be set. Run the **localectl list-keymaps** command to obtain the value range. Change it based on the site requirements. - -``` -$ localectl set-keymap map -``` - -The keyboard layout will be equally applied to graphical user interfaces. - -Then you can verify if your setting was successful by checking the status: - -``` -$ localectl status - System Locale: LANG=zh_CN.UTF-8 - VC Keymap: cn - X11 Layout: us -``` - -## Setting the Date and Time - -This topic describes how to set the system date, time, and time zone by using timedatectl, date, and hwclock commands. - -### Using the timedatectl Command - -#### Displaying the Current Date and Time - -To display the current date and time, run the following command: - -``` -$ timedatectl -``` - -Example command output: - -``` -$ timedatectl - Local time: Mon 2019-09-30 04:05:00 EDT - Universal time: Mon 2019-09-30 08:05:00 UTC - RTC time: Mon 2019-09-30 08:05:00 - Time zone: America/New_York (EDT, -0400) -System clock synchronized: no - NTP service: inactive - RTC in local TZ: no -``` - -#### Synchronizing the System Clock with a Remote Server - -Your system clock can be automatically synchronized with a remote server using the Network Time Protocol (NTP). Run the following command as the user **root** to enable or disable NTP. The value of _boolean_ is **yes** or **no**, indicating that the NTP is enabled or disabled for automatic system clock synchronization. Change the value based on the site requirements. - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -If the remote NTP server is enabled to automatically synchronize the system clock, you cannot manually change the date and time. If you need to manually change the date or time, ensure that automatic NTP system clock synchronization is disabled. You can run the **timedatectl set-ntp no** command to disable the NTP service. - -``` -# timedatectl set-ntp boolean -``` - -For example, to enable automatic remote time synchronization, run the following command: - -``` -# timedatectl set-ntp yes -``` - -#### Changing the Current Date - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -Before changing the date, ensure that automatic NTP system clock synchronization has been disabled. - -Run the following command as the user **root** to change the current date. In the command, _YYYY_ indicates the year, _MM_ indicates the month, and _DD_ indicates the day. Change them based on the site requirements. - -``` -# timedatectl set-time YYYY-MM-DD -``` - -For example, to change the current date to August 14, 2019, run the following command as the user **root**: - -``` -# timedatectl set-time '2019-08-14' -``` - -#### Changing the Current Time - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -Before changing the time, ensure that automatic NTP system clock synchronization has been disabled. - -To change the current time, run the following command as the user **root**. In the command, _HH_ indicates the hour, _MM_ indicates the minute, and _SS_ indicates the second. Change them based on the site requirements. - -``` -# timedatectl set-time HH:MM:SS -``` - -For example, to change the current time to 15:57:24, run the following command: - -``` -# timedatectl set-time 15:57:24 -``` - -#### Changing the Time Zone - -To list all available time zones, run the following command: - -``` -$ timedatectl list-timezones -``` - -To change the current time zone, run the following command as the user **root**. In the command, _time\_zone_ indicates the time zone to be set. Change it based on the site requirements. - -``` -# timedatectl set-timezone time_zone -``` - -Imagine you want to identify which time zone is closest to your present location while you are in Asia. You can check that by listing all available time zones in Asia with the following command: - -``` -# timedatectl list-timezones | grep Asia -Asia/Aden -Asia/Almaty -Asia/Amman -Asia/Anadyr -Asia/Aqtau -Asia/Aqtobe -Asia/Ashgabat -Asia/Baghdad -Asia/Bahrain -...... - -Asia/Seoul -Asia/Shanghai -Asia/Singapore -Asia/Srednekolymsk -Asia/Taipei -Asia/Tashkent -Asia/Tbilisi -Asia/Tehran -Asia/Thimphu -Asia/Tokyo -``` - -To change the time zone to Asia/Shanghai, run the following command: - -``` -# timedatectl set-timezone Asia/Shanghai -``` - -### Using the date Command - -#### Displaying the Current Date and Time - -To display the current date and time, run the following command: - -``` -$ date -``` - -By default, the **date** command displays the local time. To display the time in Coordinated Universal Time (UTC), run the command with the --utc or -u command line option: - -``` -$ date --utc -``` - -You can also customize the format of the displayed information by providing the + "format" option on the command line: - -``` -$ date +"format" -``` - -**Table 1** Formatting options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Format Option

-

Description

-

%H

-

The hour in the HH format (for example, 17)

-

%M

-

The minute in the MM format (for example, 37)

-

%S

-

The second in the SS format (for example, 25)

-

%d

-

The day of the month in the DD format (for example, 15)

-

%m

-

The month in the MM format (for example, 07)

-

%Y

-

The year in the YYYY format (for example, 2019)

-

%Z

-

The time zone abbreviation (for example, CEST)

-

%F

-

The full date in the YYYY-MM-DD format (for example, 2019-7-15). This option is equal to %Y-%m-%d.

-

%T

-

The full time in the HH:MM:SS format (for example, 18:30:25). This option is equal to %H:%M:%S.

-
-Example commands and outputs: - -- To display the current date and time: - - ``` - $ date - Sat Aug 17 17:26:34 CST 2019 - ``` - -- To display the current date and time in UTC: - - ``` - $ date --utc - Sat Aug 17 09:26:18 UTC 2019 - ``` - -- To customize the output of the date command: - - ``` - $ date +"%Y-%m-%d %H:%M" - 2019-08-17 17:24 - ``` - -#### Changing the Current Time - -To change the current time, run the date command with the --set or -s option as the root user: Run the following command as the user **root**. In the command, _HH_ indicates the hour, _MM_ indicates the minute, and _SS_ indicates the second. Change them based on the site requirements. - -``` -# date --set HH:MM:SS -``` - -By default, the date command sets the local time. To set the system clock in UTC instead, run the command with the --utc or -u command line option: - -``` -# date --set HH:MM:SS --utc -``` - -For example, to change the current time to 23:26:00, run the following command as the user **root**: - -``` -# date --set 23:26:00 -``` - -#### Changing the Current Date - -To change the current date, run the command with the --set or -s command line option. Run the following command as the user **root**. In the command, _YYYY_ indicates the year, _MM_ indicates the month, and _DD_ indicates the day. Change them based on the site requirements. - -``` -# date --set YYYY-MM-DD -``` - -For example, to change the current date to November 2, 2019, run the following command as the user **root**: - -``` -# date --set 2019-11-02 -``` - -### Using the hwclock Command - -You can run the hwclock command to set the real time clock (RTC). - -#### Real-Time Clock and System Clock - -Linux divides clocks into the following types: - -- System clock: clock of the current Linux kernel. -- Hardware clock RTC: hardware clock of the mainboard powered by the battery. This clock can be set in the **Standard BIOS Feature** option of the BIOS. - -When Linux starts, it reads the RTC and sets the system clock time based on the RTC time. - -#### Displaying the Current Date and Time - -To display the current RTC date and time, run the following command as the user **root**: - -``` -# hwclock -``` - -Example command output: - -``` -# hwclock -2019-08-26 10:18:42.528948+08:00 -``` - -#### Setting the Date and Time - -Run the following command as the user **root** to change the date and time of the current hardware. In the command, _dd_ indicates the day, _mm_ indicates the month, _yyyy_ indicates the year, _HH_ indicates the hour, and _MM_ indicates the minute. Change them based on the site requirements. - -``` -# hwclock --set --date "dd mm yyyy HH:MM" -``` - -For example, to change the current time to 21:17 on October 21, 2019, run the following command: - -``` -# hwclock --set --date "21 Oct 2019 21:17" --utc -``` - -## Setting kdump - -This section describes how to set the memory reserved for kdump and modify parameters in the kdump configuration file. - -### Setting the Memory Reserved for kdump - -#### Parameter Formats of the Memory Reserved for kdump - -The memory reserved for kdump must be added to the bootargs in the **/boot/efi/EFI/openEuler/grub.cfg** configuration file. The memory reserved for kdump has been added to the released openEuler version by default and can be adjusted as required. After adding or modifying the bootargs, restart the system for the setting to take effect. The parameter formats of the memory reserved for kdump are as follows: - -| Bootarg| Description| Default Value| Remarks| -|----------|----------|----------|----------| -| crashkernel=x| If the physical memory size is less than 4 GB, x of the memory is reserved for kdump.| The default value is 512 MB for x86.| This configuration method is used only when the available memory size is less than 4 GB. In this case, ensure that the available contiguous memory is sufficient for reservation.| -| crashkernel=x@y| x of the memory is reserved at the start address of y for kdump.| Unused| Ensure that x of the memory at the start address of y is not reserved for other modules.| -| crashkernel=x,high| If the physical memory size is less than 4 GB, 256 MB memory is reserved. If the physical memory size is greater than 4 GB, x of the memory is reserved for kdump. | The default value is 1024M,high for ARM64.| Ensure that the available physical contiguous memory size is greater than or equal to 256 MB when the memory size is less than 4 GB, and is greater than or equal to x when the memory size is greater than 4 GB. The actual reserved memory size is 256 MB + x. | -| crashkernel=x,low crashkernel=y,high| x of the memory is reserved for kdump when the physical memory size is less than 4 GB, and y of the memory is reserved for kdump when the physical memory size is greater than 4 GB. | Unused| Ensure that the available physical contiguous memory size is greater than or equal to x when the physical memory size is less than 4 GB, and is greater than or equal to y when the physical memory size is greater than 4 GB.| - -### Recommended Reserved Memory - -| Recommended Solution| Reserved Parameter| Description| -|----------|----------|----------| -| General solution| crashkernel=2048M,high| If the memory size is less than 4 GB, 256 MB is reserved for kdump. If the memory size is greater than 4 GB, 2048 MB is reserved for kdump. 256 + 2048 MB in total.| -| Economical solution| crashkernel=1024M,high| If the memory size is less than 4 GB, 256 MB is reserved for kdump. If the memory size is greater than 4 GB, 1024 MB is reserved for kdump. 256 + 1024 MB in total. It is recommended that kdump files not be dumped using the network in scenarios where the system memory size is less than 512 GB. In VM scenarios, you can reduce the reserved memory. You are advised to set crashkernel to 512M or crashkernel to 256M,high.| - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -> -> If kdump files are not dumped using the network, you need to set the kdump file system not to pack network drivers. Loading the network driver requires a large amount of memory. As a result, the memory reserved for kdump may be insufficient and kdump may fail. Therefore, you are advised to disable network drivers. - -### Disabling Network Drivers - -In the kdump configuration file **/etc/kdump.conf**, the dracut parameters can be used to set the tailored driver module. You can configure the network driver to the tailored driver list to prevent the kdump file system from loading the driver. After the configuration file is modified, restart the kdump service for the modification to take effect. Set the dracut parameters as follows: - -`dracut_args --omit-drivers "mdio-gpi usb_8dev et1011c rt2x00usb bcm-phy-lib mac80211_hwsim rtl8723be rndis_host hns3_cae amd vrf rtl8192cu mt76x02-lib int51x1 ppp_deflate team_mode_loadbalance smsc911x aweth bonding mwifiex_usb hnae dnet rt2x00pci vaser_pci hdlc_ppp marvell rtl8xxxu mlxsw_i2c ath9k_htc rtl8150 smc91x cortina at803x rockchip cxgb4 spi_ks8995 mt76x2u smsc9420 mdio-cavium bnxt_en ch9200 dummy macsec ice mt7601u rtl8188ee ixgbevf net1080 liquidio_vf be2net mlxsw_switchx2 gl620a xilinx_gmii2rgmii ppp_generic rtl8192de sja1000_platform ath10k_core cc770_platform realte igb c_can_platform c_can ethoc dm9601 smsc95xx lg-vl600 ifb enic ath9 mdio-octeon ppp_mppe ath10k_pci cc770 team_mode_activebackup marvell10g hinic rt2x00lib mlx4_en iavf broadcom igc c_can_pci alx rtl8192se rtl8723ae microchip lan78xx atl1c rtl8192c-common almia ax88179_178a qed netxen_nic brcmsmac rt2800usb e1000 qla3xxx mdio-bitbang qsemi mdio-mscc-miim plx_pci ipvlan r8152 cx82310_eth slhc mt76x02-usb ems_pci xen-netfront usbnet pppoe mlxsw_minimal mlxsw_spectrum cdc_ncm rt2800lib rtl_usb hnae3 ath9k_common ath9k_hw catc mt76 hns_enet_drv ppp_async huawei_cdc_ncm i40e rtl8192ce dl2 qmi_wwan mii peak_usb plusb can-dev slcan amd-xgbe team_mode_roundrobin ste10Xp thunder_xcv pptp thunder_bgx ixgbe davicom icplus tap tun smsc75xx smsc dlci hns_dsaf mlxsw_core rt2800mmi softing uPD60620 vaser_usb dp83867 brcmfmac mwifiex_pcie mlx4_core micrel team macvlan bnx2 virtio_net rtl_pci zaurus hns_mdi libcxgb hv_netvsc nicvf mt76x0u teranetics mlxfw cdc_eem qcom-emac pppox mt76-usb sierra_net i40evf bcm87xx mwifiex pegasus rt2x00mmi sja1000 ena hclgevf cnic cxgb4vf ppp_synctty iwlmvm team_mode_broadcast vxlan vsockmon hdlc_cisc rtl8723-common bsd_comp fakelb dp83822 dp83tc811 cicada fm10 8139t sfc hs geneve hclge xgene-enet-v2 cdc_mbim hdlc asix netdevsim rt2800pci team_mode_random lxt ems_usb mlxsw_pci sr9700 mdio-thunder mlxsw_switchib macvtap atlantic cdc_ether mcs7830 nicpf mdi peak_pci atl1e cdc_subset ipvtap btcoexist mt76x0-common veth slip iwldvm bcm7xxx vitesse netconsole epic100 myri10ge r8169 qede microchip_t1 liquidi bnx2x brcmutil mwifiex_sdi mlx5_core rtlwifi vmxnet3 nlmon hns3 hdlc_raw esd_usb2 atl2 mt76x2-common iwlwifi mdio-bcm-unimac national ath rtwpci rtw88 nfp rtl8821ae fjes thunderbolt-net 8139cp atl1 mscc vcan dp83848 dp83640 hdlc_fr e1000e ipheth net_failover aquantia rtl8192ee igbvf rocker intel-xway tg3" --omit "ramdisk network ifcfg qemu-net" --install "chmod" --nofscks` \ No newline at end of file diff --git a/docs/en/docs/Administration/configuring-services.md b/docs/en/docs/Administration/configuring-services.md deleted file mode 100644 index 35ffda1bb6f4f8a2eae7527a03101872449a82fc..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/configuring-services.md +++ /dev/null @@ -1,4 +0,0 @@ -# Configuring Services - - - diff --git a/docs/en/docs/Administration/configuring-the-ftp-server.md b/docs/en/docs/Administration/configuring-the-ftp-server.md deleted file mode 100644 index e841f0f63bc612e9e07973d8ae8e67c8f11cd14d..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/configuring-the-ftp-server.md +++ /dev/null @@ -1,556 +0,0 @@ -# Configuring the FTP Server - - -- [Configuring the FTP Server](#configuring-the-ftp-server) - - [General Introduction](#general-introduction) - - [FTP Overview](#ftp-overview) - - [Port Used by the FTP Server](#port-used-by-the-ftp-server) - - [Introduction to vsftpd](#introduction-to-vsftpd) - - [Using vsftpd](#using-vsftpd) - - [Installing vsftpd](#installing-vsftpd) - - [Service Management](#service-management) - - [Configuring vsftpd](#configuring-vsftpd) - - [vsftpd Configuration Files](#vsftpd-configuration-files) - - [Default Configuration Description](#default-configuration-description) - - [Setting the Local Time](#setting-the-local-time) - - [Configuring Welcome Information](#configuring-welcome-information) - - [Configuring the Login Permission of a System Account](#configuring-the-login-permission-of-a-system-account) - - [Verifying Whether the FTP Service Is Successfully Set Up](#verifying-whether-the-ftp-service-is-successfully-set-up) - - [Configuring a Firewall](#configuring-a-firewall) - - [File Transmission](#file-transmission) - - [Overview](#overview) - - [Connecting to the Server](#connecting-to-the-server) - - [Downloading a File](#downloading-a-file) - - [Uploading a file](#uploading-a-file) - - [Deleting a File](#deleting-a-file) - - [Disconnecting from the Server](#disconnecting-from-the-server) - - - -## General Introduction - -### FTP Overview -File Transfer Protocol \(FTP\) is one of the earliest transmission protocols on the Internet. It is used to transfer files between the server and client. FTP allows users to access files on a remote system using a set of standard commands without logging in to the remote system. In addition, the FTP server provides the following functions: - -- Subscriber classification - - By default, the FTP server classifies users into real users, guest users, and anonymous users based on the login status. The three types of users have different access permissions. Real users have complete access permissions, while anonymous users have only the permission to downloading resources. - -- Command records and log file records - - FTP can use the syslogd to record data, including historical commands and user transmission data \(such as the transmission time and file size\). Users can obtain log information from the /var/log/ directory. - -- Restricting the access scope of users - - FTP can limit the work scope of a user to the home directory of the user. After a user logs in to the system through FTP, the root directory displayed by the system is the home directory of the user. This environment is called change root \(chroot for short\). In this way, users can access only the main directory, but not important directories such as /etc, /home, and /usr/local. This protects the system and keeps the system secure. - - -### Port Used by the FTP Server -The FTP service requires multiple network ports. The server uses the following ports: - -- Command channel. The default port number is 21. -- Data channel. The default port number is 20. - -Port 21 is used to receive connection requests from the FTP client, and port 20 is used by the FTP server to proactively connect to the FTP client. - -### Introduction to vsftpd -FTP has a long history and uses the unencrypted transmission mode, and is therefore considered insecure. This section describes the Very Secure FTP Daemon \(vsftpd\), to use FTP in a more secure way. - -The vsftpd is introduced to build a security-centric FTP server. The vsftpd is designed with the following features: - -- The startup user of the vsftpd service is a common user who has low system permission. In addition, the vsftpd service uses chroot to change the root directory, preventing the risk of misusing system tools. -- Any vsftpd command that requires high execution permission is controlled by a special upper-layer program. The upper-layer program has low permission and does not affect the system. -- vsftpd integrates most of the extra commands \(such as dir, ls, and cd\) used by FTP. Generally, the system does not need to provide extra commands, which are secure for the system. - -## Using vsftpd - -### Installing vsftpd -To use the vsftpd service, you need to install the vsftpd software. If the yum source has been configured, run the following command as the root user to install the vsftpd service: - -``` -# dnf install vsftpd -``` - -### Service Management -To start, stop, or restart the vsftpd service, run the corresponding command as the root user. - -- Starting vsftpd services - - ``` - # systemctl start vsftpd - ``` - - You can run the netstat command to check whether communication port 21 is enabled. If the following information is displayed, the vsftpd service has been enabled. - - ``` - # netstat -tulnp | grep 21 - tcp6 0 0 :::21 :::* LISTEN 19716/vsftpd - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >If the **netstat** command does not exist, run the **dnf install net-tools** command to install the **net-tools** software and then run the **netstat** command. - -- Stopping the vsftpd services - - ``` - # systemctl stop vsftpd - ``` - - -- Restarting the vsftpd service - - ``` - # systemctl restart vsftpd - ``` - - -## Configuring vsftpd - - - -### vsftpd Configuration Files - -You can modify the vsftpd configuration file to control user permissions. [Table 1](#table1541615718372) describes the vsftpd configuration files. You can modify the configuration files as required. You can run the man command to view more parameter meanings. - -**Table 1** vsftpd configuration files - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Configuration File

-

Description

-

/etc/vsftpd/vsftpd.conf

-

Main configuration file of the vsftpd process. The configuration format is Parameter=Parameter value. The parameter and parameter value cannot be empty.

-

You can run the following command to view details about the vsftpd.conf file:

-

man 5 vsftpd.conf

-

/etc/pam.d/vsftpd

-

Pluggable authentication modules (PAMs) are used for identity authentication and restrict some user operations.

-

/etc/vsftpd/ftpusers

-

List of users who are not allowed to use the vsftpd. By default, the system account is also in this file. Therefore, the system account cannot use vsftpd by default.

-

/etc/vsftpd/user_list

-

List of users who are allowed or not allowed to log in to the vsftpd server. Whether the file takes effect depends on the following parameters in the main configuration file vsftpd.conf:

-

userlist_enable: indicates whether to enable the userlist mechanism. The value YES indicates that the userlist mechanism is enabled. In this case, the userlist_deny configuration is valid. The value NO indicates that the userlist mechanism is disabled.

-

userlist_deny: indicates whether to forbid users in the user list to log in. YES indicates that users in the user list are forbidden to log in. NO indicates that users in the command are allowed to log in.

-

For example, if userlist_enable is set to YES and userlist_deny is set to YES, all users in the user list cannot log in.

-

/etc/vsftpd/chroot_list

-

Whether to restrict the user list in the home directory. By default, this file does not exist. You need to create it manually. It is the value of chroot_list_file in the vsftpd.conf file.

-

The function of this parameter is determined by the following parameters in the vsftpd.conf file:

-
  • chroot_local_user: indicates whether to restrict all users to the home directory. The value YES indicates that all users are restricted to the home directory, and the value NO indicates that all users are not restricted to the home directory.
  • chroot_list_enable: indicates whether to enable the list of restricted users. The value YES indicates that the list is enabled, and the value NO indicates that the list is disabled.
-

For example, if chroot_local_user is set to YES, chroot_list_enable is set to YES, and chroot_list_file is set to /etc/vsftpd/chroot_list, all users are restricted to their home directories, and users in chroot_list are not restricted.

-

/usr/sbin/vsftpd

-

Unique execution file of vsftpd.

-

/var/ftp/

-

Default root directory for anonymous users to log in. The root directory is related to the home directory of the ftp user.

-
- - -### Default Configuration Description - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The configuration content in this document is for reference only. You can modify the content based on the site requirements \(for example, security hardening requirements\). - -In the openEuler system, vsftpd does not open to anonymous users by default. Run the vim command to view the main configuration file. The content is as follows: - -``` -$ vim /etc/vsftpd/vsftpd.conf -anonymous_enable=NO -local_enable=YES -write_enable=YES -local_umask=022 -dirmessage_enable=YES -xferlog_enable=YES -connect_from_port_20=YES -xferlog_std_format=YES -listen=NO -listen_ipv6=YES -pam_service_name=vsftpd -userlist_enable=YES -``` - -[Table 2](#table18185162512499) describes the parameters. - -**Table 2** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

anonymous_enable

-

Indicates whether to allow anonymous users to log in. YES indicates that anonymous users are allowed to log in; NO indicates that anonymous users are not allowed to log in.

-

local_enable

-

Whether to allow local users to log in. YES indicates that local users are allowed to log in. NO indicates that local users are not allowed to log in.

-

write_enable

-

Whether to allow the login user to have the write permission. YES indicates that the upload and write function is enabled, and NO indicates that the function is disabled.

-

local_umask

-

Indicates the umask value when a local user adds a profile.

-

dirmessage_enable

-

Indicates whether to display the contents that users need to pay attention to when a user accesses a directory. The options are YES (yes) and NO (no).

-

xferlog_enable

-

Indicates whether to record file upload and download operations. The options are YES (record operations) and NO (not record operations).

-

connect_from_port_20

-

Indicates whether port 20 is used for data transmission in port mode. YES indicates that port 20 is used, and NO indicates that port 20 is not used.

-

xferlog_std_format

-

Indicates whether the transfer log file is written in the standard xferlog format. The options are YES (yes) and NO (no).

-

listen

-

Indicates whether the vsftpd service is started in standalone mode. The options are YES (yes) and NO (no).

-

pam_service_name

-

Support for PAM management. The value is a service name, for example, vsftpd.

-

userlist_enable

-

Indicates whether to support account login control in the /etc/vsftpd/user_list file. The options are YES (yes) and NO (no).

-

tcp_wrappers

-

Indicates whether to support the firewall mechanism of the TCP Wrappers. The options are YES (yes) and NO (no).

-

listen_ipv6

-

Indicates whether to listen to IPv6 FTP requests. The options are YES (yes) and NO (no). listen and listen_ipv6 cannot be enabled at the same time.

-
- -### Setting the Local Time - -#### Overview -In the openEuler system, vsftpd uses the Greenwich Mean Time \(GMT\) time by default, which may be different from the local time. For example, the GMT time is 8 hours later than the Beijing time. You need to change the GMT time to the local time. Otherwise, the server time and client time are inconsistent, which may cause errors during file upload and download. - -#### Setting Method -To set the vsftpd time to the local time, perform the following steps as the **root** user: - -1. Open the vsftpd.conf file and change the value of use\_localtime to **YES**. Run the following command: - - ``` - # vim /etc/vsftpd/vsftpd.conf - ``` - - Modify the file contents as follows: - - ``` - use_localtime=YES - ``` - -2. Restart the vsftpd service. - - ``` - # systemctl restart vsftpd - ``` - -3. Set the vsftpd service to start automatically upon power-on. - - ``` - # systemctl enable vsftpd - ``` - - -### Configuring Welcome Information - -To use the vsftpd service normally, the welcome information file must exist. To configure the welcome.txt file of the vsftp service, perform the following steps as the **root** user: - -1. Open the vsftpd.conf configuration file, add the welcome information to the file, save the file, and exit. - - ``` - # vim /etc/vsftpd/vsftpd.conf - ``` - - The following configuration lines need to be added: - - ``` - banner_file=/etc/vsftpd/welcome.txt - ``` - -2. Create welcome information. Specifically, open the welcome.txt file, write the welcome information, save the file, and exit. - - ``` - # vim /etc/vsftpd/welcome.txt - ``` - - The following is an example: - - ``` - Welcome to this FTP server! - ``` - - -### Configuring the Login Permission of a System Account - -Generally, users need to restrict the login permission of some accounts. You can set the restriction as required. - -Two files are used to restrict the login of system accounts. The default files are as follows: - -- /etc/vsftpd/ftpusers: This file is managed by the PAM module and is determined by the settings of the /etc/pam.d/vsftpd file. -- /etc/vsftpd/user\_list: This file is set by userlist\_file in vsftpd.conf and is provided by vsftpd. - -Both files must exist and have the same content. You can write the accounts whose UIDs are smaller than 500 to the two files by referring to the /etc/passwd. Each line indicates an account. - -To restrict the login of system accounts, add the accounts to /etc/vsftpd/ftpusers and /etc/vsftpd/user\_list as the **root** user. - -Open the user\_list file to view the account information in the current file. The command and output are as follows: - -``` -$ vim /etc/vsftpd/user_list -root -bin -daemon -adm -lp -sync -shutdown -halt -mail -news -uucp -operator -games -nobody -``` - -## Verifying Whether the FTP Service Is Successfully Set Up - -You can use the FTP client provided by openEuler for verification. The command and output are as follows. Enter the user name \(an existing user in the system\) and password as prompted. If the message "Login successful" is displayed, the FTP server is successfully set up. - -``` -$ ftp localhost -Trying 127.0.0.1... -Connected to localhost (127.0.0.1). -220-Welcome to this FTP server! -220 -Name (localhost:root): USERNAME -331 Please specify the password. -Password: -230 Login successful. -Remote system type is UNIX. -Using binary mode to transfer files. -ftp> bye -221 Goodbye. -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If the **ftp** command does not exist, run the **dnf install ftp** command as the **root** user to install the **ftp** software and then run the **ftp** command. - -## Configuring a Firewall - -To open the FTP service to the Internet, you need to configure the firewall and SElinux as the **root** user. - -``` -# firewall-cmd --add-service=ftp --permanent -success -# firewall-cmd --reload -success -# setsebool -P ftpd_full_access on -``` - -## File Transmission - -### Overview -This section describes how to transfer files after the vsftpd service is started. - -### Connecting to the Server -**Command Format** - -**ftp** \[_hostname_ | _ip-address_\] - -**hostname** indicates the name of the server, and **ip-address** indicates the IP address of the server. - -**Requirements** - -Run the following command on the command-line interface \(CLI\) of the openEuler OS: - -``` -$ ftp ip-address -``` - -Enter the user name and password as prompted. If the following information is displayed after the authentication is successful, the FTP connection is successful. In this case, you have accessed the directory of the connected server. - -``` -ftp> -``` - -At this prompt, you can enter different commands to perform related operations. - -- Display the current path of the server. - - ``` - ftp>pwd - ``` - -- Display the local path. You can upload the files in this path to the corresponding location on the FTP server. - - ``` - ftp>lcd - ``` - -- Exit the current window and return to the local Linux terminal. - - ``` - ftp>! - ``` - - -### Downloading a File -Generally, the get or mget command is used to download files. - -**How to use get** - -- Function description: Transfers files from a remote host to a local host. -- Command format: **get** \[_remote-file_\] \[_local-file_\] - - _remote-file_ indicates a remote file, and _local-file_ indicates a local file. - -- For example, run the following command to obtain the /home/openEuler/openEuler.htm file on the remote server to the local directory /home/myopenEuler/ and change the file name to myopenEuler.htm - - ``` - ftp> get /home/openEuler/openEuler.htm /home/myopenEuler/myopenEuler.htm - ``` - - -**How to use mget** - -- Function description: Receives a batch of files from the remote host to the local host. -- Command format: **mget** \[_remote-file_\] - - _remote-file_ indicates a remote file. - -- For example, to obtain all files in the /home/openEuler/ directory on the server, run the following command: - - ``` - ftp> cd /home/openEuler/ - ftp> mget *.* - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- In this case, a message is displayed each time a file is downloaded. To block the prompt information, run the **prompt off** command before running the **mget \*.\*** command. - >- The files are downloaded to the current directory on the Linux host. For example, if you run the ftp command in /home/myopenEuler/, all files are downloaded to /home/myopenEuler/. - - -### Uploading a file -Generally, the put or mput command is used to upload files. - -**How to use put** - -- Function: Transfers a local file to a remote host. -- Command format: **put** \[_local-file_\] \[_remote-file_\] - - _remote-file_ indicates a remote file, and _local-file_ indicates a local file. - -- For example, run the following command to transfer the local myopenEuler.htm file to the remote host /home/openEuler/ and change the file name to openEuler.htm: - - ``` - ftp> put myopenEuler.htm /home/openEuler/openEuler.htm - ``` - - -**How to use mput** - -- Function: Transfers a batch of files from the local host to a remote host. -- Command format: **mput** \[_local-file_\] - - _local-file_ indicates a local file. - -- For example, run the following command to upload all HTM files in the local directory to the /home/openEuler/ directory on the server: - - ``` - ftp> cd /home/openEuler/ - ftp> mput *.htm - ``` - - -### Deleting a File -Generally, the **delete** or **mdelete** command is used to delete a file. - -**How to use delete** - -- Function description: Deletes one or more files from the remote server. -- Command format: **delete** \[_remote-file_\] - - _remote-file_ indicates a remote file. - -- For example, to delete the /home/openEuler/openEuler.htm from the remote server, run the following command: - - ``` - ftp> cd /home/openEuler/ - ftp> delete openEuler.htm - ``` - - -**How to use mdelete** - -- Function description: Deletes files from a remote server. This function is used to delete files in batches. -- Command format: **mdelete** \[_remote-file_\] - - _remote-file_ indicates a remote file. - -- For example, to delete all files whose names start with **a** from the /home/openEuler/ directory on the remote server, run the following command: - - ``` - ftp> cd /home/openEuler/ - ftp> mdelete a* - ``` - - -### Disconnecting from the Server -Run the bye command to disconnect from the server. - -``` -ftp> bye -``` diff --git a/docs/en/docs/Administration/configuring-the-network.md b/docs/en/docs/Administration/configuring-the-network.md deleted file mode 100644 index ce912c33916557a0952eb47256ce8115622d8af2..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/configuring-the-network.md +++ /dev/null @@ -1,1335 +0,0 @@ -# Configuring the Network - - -- [Configuring the Network](#configuring-the-network) - - [Configuring an IP Address](#configuring-an-ip-address) - - [Using the nmcli Command](#using-the-nmcli-command) - - [Using the ip Command](#using-the-ip-command) - - [Configuring the Network Through the ifcfg File](#configuring-the-network-through-the-ifcfg-file) - - [Configuring a Host Name](#configuring-a-host-name) - - [Introduction](#introduction) - - [Configuring a Host Name by Running the **hostnamectl** Command](#configuring-a-host-name-by-running-the-hostnamectl-command) - - [Configuring a Host Name by Running the nmcli Command](#configuring-a-host-name-by-running-the-nmcli-command) - - [Configuring Network Bonding](#configuring-network-bonding) - - [Running the nmcli Command](#running-the-nmcli-command) - - [Configuring Network Bonding by Using a Command Line](#configuring-network-bonding-by-using-a-command-line) - - [IPv6 Differences \(vs IPv4\)](#ipv6-differences-vs-ipv4) - - [Restrictions](#restrictions) - - [Configuration Description](#configuration-description) - - [FAQ](#faq) - - - - -## Configuring an IP Address - - - -### Using the nmcli Command - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The network configuration configured by running the **nmcli** command takes effect immediately and will not be lost after the system restarts. - - -#### Introduction to nmcli - -**nmcli** \(NetworkManager Command Line Interface\) is the command-line utility to configure networking through NetworkManager. The basic format of using **nmcli** is as follows: - -``` - nmcli [OPTIONS] OBJECT { COMMAND | help } -``` - -In the preceding command, **OBJECT** can be one of the following options: **general**, **networking**, **radio**, **connection**, and **device**. **OPTIONS** can be optional options, such as **-t**, **\-\-terse** \(for script processing\),**-p**, **\-\-pretty** \(for human-readable output\), **-h**, and **\-\-help**. For more information, run the **nmcli help** command. - -``` -$ nmcli help -``` - -Common commands are listed as follows: - -- To display the general status of NetworkManager, run the following command: - - ``` - $ nmcli general status - ``` - -- To display all connections, run the following command: - - ``` - $ nmcli connection show - ``` - -- To display the current active connections only, add the **-a** or **\-\-active** option as follows: - - ``` - $ nmcli connection show --active - ``` - -- To display the device identified by NetworkManager and its connection status, run the following command: - - ``` - $ nmcli device status - ``` - -- To start or stop network interfaces, for example, run the nmcli commands as the **root** user: - - ``` - # nmcli connection up id enp3s0 - # nmcli device disconnect enp3s0 - ``` - - -#### Device Management - -##### Connecting to a Device - -Run the following command to connect NetworkManager to the corresponding network device. Try to find the proper connection configuration and activate it. - - ``` - $nmcli device connect "$IFNAME" - ``` - -> If the corresponding connection configuration does not exist, NetworkManager creates and activates a configuration file with default settings. - -##### Disconnecting to a Device - -Run the following command to disconnect NetworkManager with the network device and prevent the device from being automatically activated. - - ``` - $nmcli device disconnect "$IFNAME" - ``` - -#### Setting Network Connections - -Run the following command to display all the available network connections: - -``` -$ nmcli con show - - -NAME UUID TYPE DEVICE -enp4s0 5afce939-400e-42fd-91ee-55ff5b65deab ethernet enp4s0 -enp3s0 c88d7b69-f529-35ca-81ab-aa729ac542fd ethernet enp3s0 -virbr0 ba552da6-f014-49e3-91fa-ec9c388864fa bridge virbr0 -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->In the command output, **NAME** indicates the connection ID \(name\). - -After a network connection is added, the corresponding configuration file is generated and associated with the corresponding device. To check for available devices, run the following command: - -``` -$ nmcli dev status - -DEVICE TYPE STATE CONNECTION -enp3s0 ethernet connected enp3s0 -enp4s0 ethernet connected enp4s0 -virbr0 bridge connected virbr0 -lo loopback unmanaged -- -virbr0-nic tun unmanaged -- -``` - - - -##### Configuring Dynamic IP Connections - -###### Configuring IP Addresses -When DHCP is used to allocate a network, run the following command to add a network configuration file: - -``` -nmcli connection add type ethernet con-name connection-name ifname interface-name -``` - -For example, to create a dynamic connection configuration file named **net-test**, run the following command as the **root** user: - -``` -# nmcli connection add type ethernet con-name net-test ifname enp3s0 -Connection 'net-test' (a771baa0-5064-4296-ac40-5dc8973967ab) successfully added. -``` - -The NetworkManager sets **connection.autoconnect** to **yes** and saves the setting to the **/etc/sysconfig/network-scripts/ifcfg-net-test** file. In the **/etc/sysconfig/network-scripts/ifcfg-net-test** file, **ONBOOT** is set to **yes**. - -###### Activating a Connection and Checking Device Connection Status -Run the following command as the **root** user to activate a network connection: - -``` -# nmcli con up net-test -Connection successfully activated (D-Bus active path:/org/freedesktop/NetworkManager/ActiveConnection/5) -``` - -Run the following command to check the connection status of devices: - -``` -$ nmcli device status - -DEVICE TYPE STATE CONNECTION -enp4s0 ethernet connected enp4s0 -enp3s0 ethernet connected net-test -virbr0 bridge connected virbr0 -lo loopback unmanaged -- -virbr0-nic tun unmanaged -- -``` - -##### Configuring Static IP Connections - -###### Configuring IP Addresses -To add a static IPv4 network connection, run the following command: - -``` -nmcli connection add type ethernet con-name connection-name ifname interface-name ip4 address gw4 address -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->To add an IPv6 address and related gateway information, use the **ip6** and **gw6** options. - -For example, to create a static connection configuration file named **net-static**, run the following command as the **root** user: - -``` -# nmcli con add type ethernet con-name net-static ifname enp3s0 ip4 192.168.0.10/24 gw4 192.168.0.254 -``` - -You can also specify the IPv6 address and gateway for the device. The following is an example: - -``` -# nmcli con add type ethernet con-name test-lab ifname enp3s0 ip4 192.168.0.10/24 gw4 192.168.0.254 ip6 abbe::**** gw6 2001:***::* -Connection 'net-static' (63aa2036-8665-f54d-9a92-c3035bad03f7) successfully added. -``` - -The NetworkManager sets the internal parameter **ipv4.method** to **manual**, **connection.autoconnect** to **yes**, and writes the setting to the **/etc/sysconfig/network-scripts/ifcfg-my-office** file. In the file, **BOOTPROTO** is set to **none**, and **ONBOOT** is set to **yes**. - -Run the following command as the **root** user to set IPv4 addresses of two DNS servers: - -``` -# nmcli con mod net-static ipv4.dns "*.*.*.* *.*.*.*" -``` - -Run the following command as the **root** user to set IPv6 addresses of two DNS servers: - -``` -# nmcli con mod net-static ipv6.dns "2001:4860:4860::**** 2001:4860:4860::****" -``` - -###### Activating a Connection and Checking Device Connection Status -Run the following command as the **root** user to activate a network connection: - -``` -# nmcli con up net-static ifname enp3s0 -Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/6) -``` - -Run the following command to check the connection status of devices: - -``` -$ nmcli device status - -DEVICE TYPE STATE CONNECTION -enp4s0 ethernet connected enp4s0 -enp3s0 ethernet connected net-static -virbr0 bridge connected virbr0 -lo loopback unmanaged -- -virbr0-nic tun unmanaged -- -``` - -Run the following command to view the connection details \(with the **-p** and **\-\-pretty** options to add the title and segment to the output\): - -``` -$ nmcli -p con show net-static -=============================================================================== -Connection profile details (net-static ) -=============================================================================== -connection.id: net-static -connection.uuid: b9f18801-6084-4aee-af28-c8f0598ff5e1 -connection.stable-id: -- -connection.type: 802-3-ethernet -connection.interface-name: enp3s0 -connection.autoconnect: yes -connection.autoconnect-priority: 0 -connection.autoconnect-retries: -1 (default) -connection.multi-connect: 0 (default) -connection.auth-retries: -1 -connection.timestamp: 1578988781 -connection.read-only: no -connection.permissions: -- -connection.zone: -- -connection.master: -- -connection.slave-type: -- -connection.autoconnect-slaves: -1 (default) -connection.secondaries: -- -connection.gateway-ping-timeout: 0 -connection.metered: unknown -connection.lldp: default -connection.mdns: -1 (default) -connection.llmnr: -1 (default) -``` - -##### Adding a Wi-Fi Connection - -You can add the Wi-Fi connection using either of the following methods: - -**Method 1: Connect to the Wi-Fi network using a network port.** - -Connect to the Wi-Fi network specified by the SSID or BSSID. Run the following command to find a matching connection or create a connection, and then activate the connection on the device. - -``` -$ nmcli device wifi connect "$SSID" password "$PASSWORD" ifname "$IFNAME" -$ nmcli --ask device wifi connect "$SSID" -``` -**Method 2: Connect to the Wi-Fi network using the configuration file.** - -1,Run the following command to check for available Wi-Fi access points: - -``` -$ nmcli dev wifi list -``` - -2,Run the following command to generate a static IP address configuration that allows Wi-Fi connections automatically allocated by the DNS: - -``` -$ nmcli con add con-name Wifi ifname wlan0 type wifi ssid MyWifi ip4 192.168.100.101/24 gw4 192.168.100.1 -``` - -3,Run the following command to set a WPA2 password, for example, **answer**: - -``` -$ nmcli con modify Wifi wifi-sec.key-mgmt wpa-psk -$ nmcli con modify Wifi wifi-sec.psk answer -``` - -4,Run the following command to change the Wi-Fi status: - -``` -$ nmcli radio wifi [ on | off ] -``` - -##### Modifying Attributes - -Run the following command to check a specific attribute, for example, mtu: - -``` -$ nmcli connection show id 'Wifi ' | grep mtu -802-11-wireless.mtu: auto -``` - -Run the following command to modify the attribute: - -``` -$ nmcli connection modify id 'Wifi ' 802-11-wireless.mtu 1350 -``` - -Run the following command to confirm the modification: - -``` -$ nmcli connection show id 'Wifi ' | grep mtu -802-11-wireless.mtu: 1350 -``` - -#### Configuring a Static Route - -- Run the nmcli command to configure a static route for a network connection: - - ``` - $ nmcli connection modify enp3s0 +ipv4.routes "192.168.122.0/24 10.10.10.1" - ``` - - -- Run the following command to configure the static route using the editor: - - ``` - $ nmcli con edit type ethernet con-name enp3s0 - ===| nmcli interactive connection editor |=== - Adding a new '802-3-ethernet' connection - Type 'help' or '?' for available commands. - Type 'describe [.]' for detailed property description. - You may edit the following settings: connection, 802-3-ethernet (ethernet), 802-1x, ipv4, ipv6, dcb - nmcli> set ipv4.routes 192.168.122.0/24 10.10.10.1 - nmcli> - nmcli> save persistent - Saving the connection with 'autoconnect=yes'. That might result in an immediate activation of the connection. - Do you still want to save? [yes] yes - Connection 'enp3s0' (1464ddb4-102a-4e79-874a-0a42e15cc3c0) successfully saved. - nmcli> quit - ``` - - -### Using the ip Command - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The network configuration configured using the **ip** command takes effect immediately, but the configuration will be lost after the system restarts. - - - -#### Configuring IP Addresses - -Run the **ip** command to configure an IP address for the interface. The command format is as follows, where _interface-name_ indicates the NIC name. - -``` -ip addr [ add | del ] address dev interface-name -``` - -##### Configuring a Static IP Address -Run the following command as the **root** user to configure an IP address: - -``` -# ip address add 192.168.0.10/24 dev enp3s0 -``` - -Run the following command as the **root** user to view the configuration result: - -``` -# ip addr show dev enp3s0 -2: enp3s0: mtu 1500 qdisc fq_codel state UP group default qlen 1000 - link/ether 52:54:00:aa:ad:4a brd ff:ff:ff:ff:ff:ff - inet 192.168.202.248/16 brd 192.168.255.255 scope global dynamic noprefixroute enp3s0 - valid_lft 9547sec preferred_lft 9547sec - inet 192.168.0.10/24 scope global enp3s0 - valid_lft forever preferred_lft forever - inet6 fe80::32e8:cc22:9db2:f4d4/64 scope link noprefixroute - valid_lft forever preferred_lft forever -``` - -##### Configuring Multiple IP Addresses -The **ip** command can be used to assign multiple IP addresses to an interface. You can run the **ip** command multiple times as the **root** user to assign IP addresses to an interface. The following is an example: - -``` -# ip address add 192.168.2.223/24 dev enp4s0 -# ip address add 192.168.4.223/24 dev enp4s0 -# ip addr - -3: enp4s0: mtu 1500 qdisc fq_codel state UP group default qlen 1000 - link/ether 52:54:00:aa:da:e2 brd ff:ff:ff:ff:ff:ff - inet 192.168.203.12/16 brd 192.168.255.255 scope global dynamic noprefixroute enp4s0 - valid_lft 8389sec preferred_lft 8389sec - inet 192.168.2.223/24 scope global enp4s0 - valid_lft forever preferred_lft forever - inet 192.168.4.223/24 scope global enp4s0 - valid_lft forever preferred_lft forever - inet6 fe80::1eef:5e24:4b67:f07f/64 scope link noprefixroute - valid_lft forever preferred_lft forever -``` - -#### Configuring a Static Route - -To add a static route to the routing table, run the **ip route add** command. To delete a route, run the **ip route del** command. The following shows the common format of the **ip route** command: - -``` -ip route [ add | del | change | append | replace ] destination-address -``` - -To display the current IP routing table, run the **ip route** command as the **root** user. The following is an example: - -``` -# ip route - -default via 192.168.0.1 dev enp3s0 proto dhcp metric 100 -default via 192.168.0.1 dev enp4s0 proto dhcp metric 101 -192.168.0.0/16 dev enp3s0 proto kernel scope link src 192.168.202.248 metric 100 -192.168.0.0/16 dev enp4s0 proto kernel scope link src 192.168.203.12 metric 101 -192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown -``` - -To add a static route to the host address, run the following command as the **root** user: - -``` -ip route add 192.168.2.1 via 10.0.0.1 [dev interface-name] -``` - -In the preceding command, **192.168.2.1** is the IP address in the dot-decimal notation, **10.0.0.1** is the next hop, and _interface-name_ is the exit interface for entering the next hop. - -To add a static route to the network, that is, an IP address that represents an IP address range, run the following command as the **root** user: - -``` -ip route add 192.168.2.0/24 via 10.0.0.1 [dev interface-name] -``` - -In the preceding command, **192.168.2.1** is the IP address of the target network, _10.0.0.1_ is the network prefix, and _interface-name_ is the NIC name. - -### Configuring the Network Through the ifcfg File - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The network configured in the **ifcfg** file does not take effect immediately. You need to run the **systemctl reload NetworkManager** command as the **root** user to restart the network service for the configuration to take effect. - -#### Configuring a Static Network -The following uses the **enp4s0** network interface as an example to describe how to configure a static network by modifying the **ifcfg** file as the **root** user. The **ifcfg-enp4s0** file is generated in the **/etc/sysconfig/network-scripts/** directory. Modify the following parameters in the file: - -``` -TYPE=Ethernet -PROXY_METHOD=none -BROWSER_ONLY=no -BOOTPROTO=none -IPADDR=192.168.0.10 -PREFIX=24 -DEFROUTE=yes -IPV4_FAILURE_FATAL=no -IPV6INIT=yes -IPV6_AUTOCONF=yes -IPV6_DEFROUTE=yes -IPV6_FAILURE_FATAL=no -IPV6_ADDR_GEN_MODE=stable-privacy -NAME=enp4s0static -UUID=08c3a30e-c5e2-4d7b-831f-26c3cdc29293 -DEVICE=enp4s0 -ONBOOT=yes -``` - -#### Configuring a Dynamic Network -The following uses the **em1** network interface as an example to describe how to configure a dynamic network by modifying the **ifcfg** file. The **ifcfg-em1** file is generated in the **/etc/sysconfig/network-scripts/** directory. Modify the following parameters in the file: - -``` -DEVICE=em1 -BOOTPROTO=dhcp -ONBOOT=yes -``` - -To configure an interface to send different host names to the DHCP server, add the following content to the **ifcfg** file: - -``` -DHCP_HOSTNAME=hostname -``` - -To configure an interface to ignore the routes sent by the DHCP server to prevent network services from updating the /etc/resolv.conf file using the DNS server received from the DHCP server, add the following content to the **ifcfg** file: - -``` -PEERDNS=no -``` - -To configure an interface to use a specific DNS server, set the **PEERDNS** parameter to **no** and add the following content to the **ifcfg** file: - -``` -DNS1=ip-address -DNS2=ip-address -``` - -**ip-address** is the IP address of the DNS server. This allows the network service to update the **/etc/resolv.conf** file using the specified DNS server. - -#### Default Gateway Configuration -When determining the default gateway, parse the **/etc/sysconfig/network** file and then the **ifcfg** file, and uses the value of **GATEWAY** that is read last as the default route in the routing table. - -In a dynamic network environment, when the NetworkManager is used to manage hosts, you are advised to set the default gateway to DHCP assignment. - -## Configuring a Host Name - - - -### Introduction - -There are three types of host names: **static**, **transient**, and **pretty**. - -- **static**: Static host name, which can be set by users and saved in the **/etc/hostname** file. -- **transient**: Dynamic host name, which is maintained by the kernel. The initial value is a static host name. The default value is **localhost**. The value can be changed when the DHCP or mDNS server is running. -- **pretty**: Flexible host name, which can be set in any form \(including special characters/blanks\). Static and transient host names are subject to the general domain name restrictions. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Static and transient host names can contain only letters \(a–z and A–Z\), digits \(0–9\), hyphens \(-\), underlines \(\_\), and periods \(.\). The host names cannot start or end with a period \(.\) or contain two consecutive periods \(.\). The host name can contain a maximum of 64 characters. - -### Configuring a Host Name by Running the **hostnamectl** Command - -#### Viewing All Host Names -Run the following command to view the current host name: - -``` -$ hostnamectl status -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If no option is specified in the command, the **status** option is used by default. - -#### Setting All Host Names -Run the following command as the **root** user to set all host names: - -``` -# hostnamectl set-hostname name -``` - -#### Setting a Specific Host Name -Run the following command as the **root** user to set a specific host name: - -``` -# hostnamectl set-hostname name [option...] -``` - -The option may be one or more of **\-\-pretty**, **\-\-static**, and **\-\-transient**. - -If **\-\-static** or **\-\-transient** is used together with **\-\-pretty**, the host names of the **static** or **transient** type will be simplified to the host names of the **pretty** type with spaces replaced with hyphens \(-\) and special characters deleted. - -When setting a host name of the **pretty** type, use quotation marks if the host name contains spaces or single quotation marks. An example is as follows: - -``` -# hostnamectl set-hostname "Stephen's notebook" --pretty -``` - -#### Clearing a Specific Host Name -To clear a specific host name and restore it to the default format, run the following command as the **root** user: - -``` -# hostnamectl set-hostname "" [option...] -``` - -In the preceding command, **""** is a blank character string, and the _option_ may be one or more of **\-\-pretty**, **\-\-static**, and **\-\-transient**. - -#### Remotely Changing a Host Name -To change the host name in a remote system, run the **hostnamectl** command as the **root** user with the **-H** or **\-\-host** option. - -``` -# hostnamectl set-hostname -H [username]@hostname new_hostname -``` - -In the preceding command, _hostname_ indicates the name of the remote host to be configured, _username_ indicates the user-defined name, and *new\_hostname* indicates the new host name. **hostnamectl** is used to connect to the remote system through SSH. - -### Configuring a Host Name by Running the nmcli Command - -To query a static host name, run the following command: - -``` -$ nmcli general hostname -``` - -To name a static host as **host-server**, run the following command as **root** user: - -``` -# nmcli general hostname host-server -``` - -To enable the system to detect the change of the static host name, run the following command as the **root** user to restart the hostnamed service: - -``` -# systemctl restart systemd-hostnamed -``` - -## Configuring Network Bonding - - - -### Running the nmcli Command - -- To create a bond named **mybond0**, run the following command: - - ``` - $ nmcli con add type bond con-name mybond0 ifname mybond0 mode active-backup - ``` - -- To add a slave interface, run the following command: - - ``` - $ nmcli con add type bond-slave ifname enp3s0 master mybond0 - ``` - - To add another slave interface, repeat the preceding command with the new interface name: - - ``` - $ nmcli con add type bond-slave ifname enp4s0 master mybond0 - Connection 'bond-slave-enp4s0' (05e56afc-b953-41a9-b3f9-0791eb49f7d3) successfully added. - ``` - -- To enable a bond, run the following command to enable the slave interface first: - - ``` - $ nmcli con up bond-slave-enp3s0 - Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/14) - ``` - - ``` - $ nmcli con up bond-slave-enp4s0 - Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/15) - ``` - - Then, run the following command to enable the bond: - - ``` - $ nmcli con up mybond0 - Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/16) - ``` - - -### Configuring Network Bonding by Using a Command Line - -#### Checking Whether the Bonding Kernel Module Is Installed - -By default, the bonding kernel module is loaded. To load this module, run the following command as the **root** user: - -``` -# modprobe --first-time bonding -``` - -Run the following command as the **root** user to display the information about the module: - -``` -# modinfo bonding -``` - -For more commands, run the modprobe \-\-help command as the **root** user. - -#### Creating a Channel Bonding Interface - -To create a channel bonding interface, you can create a file named **ifcfg-bondN** in the **/etc/sysconfig/network-scripts/** directory as the **root** user \(replacing N with the actual interface number, for example, 0\). - -Write the corresponding content to the configuration file according to the type of the interface to be bonded, for example, network interface. An example of the interface configuration file is as follows: - -``` -DEVICE=bond0 -NAME=bond0 -TYPE=Bond -BONDING_MASTER=yes -IPADDR=192.168.1.1 -PREFIX=24 -ONBOOT=yes -BOOTPROTO=none -BONDING_OPTS="bonding parameters separated by spaces" -``` - -#### Creating a Slave Interface - -After creating a channel bonding interface, you must add the **MASTER** and **SLAVE** instructions to the configuration file of the slave interface. - -For example, to bind the two network interfaces enp3s0 and enp4s0 in channel mode, the configuration files are as follows: - -``` -TYPE=Ethernet -NAME=bond-slave-enp3s0 -UUID=3b7601d1-b373-4fdf-a996-9d267d1cac40 -DEVICE=enp3s0 -ONBOOT=yes -MASTER=bond0 -SLAVE=yes -``` - -``` -TYPE=Ethernet -NAME=bond-slave-enp4s0 -UUID=00f0482c-824f-478f-9479-abf947f01c4a -DEVICE=enp4s0 -ONBOOT=yes -MASTER=bond0 -SLAVE=yes -``` - -#### Activating Channel Bonding - -To activate channel bonding, you need to enable all the slave interfaces. Run the following command as the **root** user: - -``` -# ifup enp3s0 -Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/7) -``` - -``` -# ifup enp4s0 -Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/8) -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If an interface is in **up** state, run the **ifdown** _enp3s0_ command to change the state to **down**. In the command, _enp3s0_ indicates the actual NIC name. - -After that, enable all the slave interfaces to enable the bonding \(do not set them to **Down**\). - -To enable the NetworkManager to detect the modifications made by the system, run the following command as the **root** user after each modification: - -``` -# nmcli con load /etc/sysconfig/network-scripts/ifcfg-device -``` - -Run the following command as the **root** user to check the status of the bonded interface: - -``` -# ip link show - -1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 -2: enp3s0: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000 - link/ether 52:54:00:aa:ad:4a brd ff:ff:ff:ff:ff:ff -3: enp4s0: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000 - link/ether 52:54:00:aa:da:e2 brd ff:ff:ff:ff:ff:ff -4: virbr0: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default qlen 1000 - link/ether 86:a1:10:fb:ef:07 brd ff:ff:ff:ff:ff:ff -5: virbr0-nic: mtu 1500 qdisc fq_codel master virbr0 state DOWN mode DEFAULT group default qlen 1000 - link/ether 52:54:00:29:35:4c brd ff:ff:ff:ff:ff:ff -``` - -#### Creating Multiple Bondings - -The system creates a channel bonding interface for each bonding, including the **BONDING\_OPTS** instruction. This configuration method allows multiple bonded devices to use different configurations. Perform the following operations to create multiple channel bonding interfaces: - -- Create multiple **ifcfg-bondN** files that contain the **BONDING\_OPTS** instruction so that network scripts can create bonding interfaces as required. -- Create or edit the existing interface configuration file to be bonded, and add the **SLAVE** instruction. -- Use the MASTER instruction to assign the interface to be bonded, that is, the slave interface, to the channel bonding interface. - -The following is an example of the configuration file of a channel bonding interface: - -``` -DEVICE=bondN -NAME=bondN -TYPE=Bond -BONDING_MASTER=yes -IPADDR=192.168.1.1 -PREFIX=24 -ONBOOT=yes -BOOTPROTO=none -BONDING_OPTS="bonding parameters separated by spaces" -``` - -In this example, replace N with the number of the bonded interface. For example, to create two interfaces, you need to create two configuration files **ifcfg-bond0** and **ifcfg-bond1** with correct IP addresses. - -## IPv6 Differences \(vs IPv4\) - -### Restrictions - -- chrony supports global addresses but not link-local addresses. -- Firefox supports the access to the global address through HTTP or HTTPS, but does not support the access to the link-local address. - -### Configuration Description - - - -#### Setting the MTU of an Interface Device - -##### Overview -In an IPv6 scenario, the minimum MTU value of the entire routing path is used as the PMTU value of the current link. The source end determines whether to fragment packets based on the PMTU value. Other devices on the entire path do not need to fragment packets. This reduces the load of intermediate routing devices. The minimum value of IPv6 PMTU is 1280. - -##### Setting the MTU of the Interface Device -If the MTU of an interface configured with an IPv6 address is set to a value smaller than **1280** \(the minimum value of the IPv6 PMTU\), the IPv6 address of the interface will be deleted and cannot be added again. Therefore, in IPv6 scenarios, the MTU of the interface device must be greater than or equal to 1280. Run the following commands as the **root** user to view the details: - -``` -# ip addr show enp3s0 -3: enp3s0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 52:54:00:62:xx:xx brd ff:ff:ff:ff:xx:xx - inet 10.41.125.236/16 brd 10.41.255.255 scope global noprefixroute dynamic enp3s0 - valid_lft 38663sec preferred_lft 38663sec - inet6 2001:222::2/64 scope global - valid_lft forever preferred_lft forever -``` - -``` -# ip link set dev enp3s0 mtu 1200 -# ip addr show enp3s0 -3: enp3s0: mtu 1200 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 52:54:00:62:xx:xx brd ff:ff:ff:ff:xx:xx - inet 10.41.125.236/16 brd 10.41.255.255 scope global noprefixroute dynamic enp3s0 - valid_lft 38642sec preferred_lft 38642sec -``` - -``` -# ip addr add 2001:222::2/64 dev enp3s0 -RTNETLINK answers: No buffer space available -``` - -``` -# ip link set dev enp3s0 mtu 1500 -# ip addr show enp3s0 -3: enp3s0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 52:54:00:62:xx:xx brd ff:ff:ff:ff:xx:xx - inet 10.41.125.236/16 brd 10.41.255.255 scope global noprefixroute dynamic enp3s0 - valid_lft 38538sec preferred_lft 38538sec -``` - -``` -# ip addr add 2001:222::2/64 dev enp3s0 -# ip addr show enp3s0 -3: enp3s0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 52:54:00:62:xx:xx brd ff:ff:ff:ff:xx:xx - inet 10.41.125.236/16 brd 10.41.255.255 scope global noprefixroute dynamic enp3s0 - valid_lft 38531sec preferred_lft 38531sec - inet6 2001:222::2/64 scope global - valid_lft forever preferred_lft forever -``` - -#### Stateful IPv6 Address Autoconfiguration - -##### Overview -Both IPv6 and IPv4 addresses can be obtained through DHCP as the **root** user. There are configuration methods for IPv6 address: stateless autoconfiguration and stateful autoconfiguration. - -- Stateless autoconfiguration - - The DHCP server is not required for management. The device obtains the network prefix according to the router advertisement \(RA\), or the prefix of a link-local address is fixed to fe80::. The interface ID is automatically obtained based on the value of IPV6\_ADDR\_GEN\_MODE in the ifcfg file. - - 1. If the value of IPv6\_ADDR\_GEN\_MODE is stable-privacy, the device determines a random interface ID based on the device and network environment. - 2. If the value of IPv6\_ADDR\_GEN\_MODE is EUI64, the device determines the interface ID based on the device MAC address. - -- Stateful autoconfiguration: The DHCP server manages and leases IPv6 addresses from the DHCPv6 server base on the DHCPv6 protocol. - - In stateful autoconfiguration, the DHCPv6 server can classify clients based on the vendor class configured on the clients and assign IPv6 addresses in different address segments to different types of clients. In IPv4 scenarios, the client can use the -V option of the dhclient command to set the vendor-class-identifier field. The DHCP server classifies clients based on the vendor-class-identifier field in the configuration file. In IPv6 scenarios, if the same method is used to classify clients, the classification does not take effect. - - ``` - dhclient -6 -V - ``` - - This is because DHCPv6 differs greatly from DHCP. The vendor-class-option in DHCPv6 replaces the vendor-class-identifier in DHCP. However, the -V option of dhclient cannot be set to vendor-class-option. - - -##### Setting the vendor class for dhclient in Stateful IPv6 Address Autoconfiguration -- On the client, add the setting of vendor class by using the configuration file. - - Client configuration file \(/etc/dhcp/dhclient6.conf\): The file location can be customized. You need to specify the configuration file using the dhclient -cf option. - - ``` - option dhcp6.vendor-class code 16 = {integer 32, integer 16, string}; - interface "enp3s0" { - send dhcp6.vendor-class ; - } - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- \: a 32-digit integer, indicating the enterprise ID. The enterprise is registered through the IANA. - >- \: a 16-digit integer, indicating the length of the vendor class string. - >- \: character string of the vendor class to be set, for example, HWHW. - - On the client: - - ``` - dhclient -6 -cf /etc/dhcp/dhclient6.conf - ``` - - -- The DHCPv6 server configuration file \(/etc/dhcp/dhcpd6.conf\) needs to be specified by the dhcpd -cf option. - - ``` - option dhcp6.vendor-class code 16 = {integer 32, integer 16, string}; - subnet6 fc00:4:12:ffff::/64 { - class "hw" { - match if substring ( option dhcp6.vendor-class, 6, 10 ) = "HWHW"; - } - pool6 { - allow members of "hw"; - range6 fc00:4:12:ffff::ff10 fc00:4:12:ffff::ff20; - } - pool6 { - allow unknown clients; - range6 fc00:4:12:ffff::100 fc00:4:12:ffff::120; - } - } - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >In substring \(option dhcp6.vendor-class, 6, 10\), the start position of the substring is 6, because the substring contains four bytes of and two bytes of . The end position of the substring is 6+. In this example, the vendor class string is HWHW, and the length of the string is 4. Therefore, the end position of the substring is 6 + 4 = 10. You can specify and as required. - - On the server: - - ``` - dhcpd -6 -cf /etc/dhcp/dhcpd6.conf - ``` - - -#### Kernel Supporting Socket-Related System Calls - -##### Overview -The length of an IPv6 address is extended to 128 bits, indicating that there are sufficient IPv6 addresses for allocation. Compared with the IPv4 header, the IPv6 header is simplified, and the IPv6 automatic configuration function is enhanced. IPv6 addresses are classified into unicast addresses, multicast addresses, and anycast addresses. Common unicast addresses include link-local addresses, unique local addresses, and global addresses. As there are sufficient global IPv6 addresses, unique local addresses are not used. \(formerly known as site-local addresses, which were discarded in 2004.\) Currently, the mainstream unicast addresses are link-local address and global address. The current kernel supports socket system invoking. The link-local address and global address using unicast addresses are different. - -##### Differences Between the link-local Address and global Address During Socket Invoking -RFC 2553: Basic Socket Interface Extensions for IPv6 defines the sockaddr\_in6 data structure as follows: - -``` -struct sockaddr_in6 { - uint8_t sin6_len; /* length of this struct */ - sa_family_t sin6_family; /* AF_INET6 */ - in_port_t sin6_port; /* transport layer port # */ - uint32_t sin6_flowinfo; /* IPv6 flow information */ - struct in6_addr sin6_addr; /* IPv6 address */ - uint32_t sin6_scope_id; /* set of interfaces for a scope */ -}; -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->sin6\_scope\_id: a 32-bit integer. For the link-local address, it identifies the index of the specified interface. For the link-range sin6\_addr, it identifies the index of the specified interface. For the site-range sin6\_addr, it is used as the site identifier \(the site-local address has been discarded\). - -When the link-local address is used for socket communication, the interface index corresponding to the address needs to be specified when the destination address is constructed. Generally, you can use the if\_nametoindex function to convert an interface name into an interface index number. Details are as follows: - -``` -int port = 1234; -int sk_fd; -int iff_index = 0; -char iff_name[100] = "enp3s0"; -char * ll_addr[100] = "fe80::123:456:789"; -struct sockaddr_in6 server_addr; - -memset(&server_addr,0,sizeof(structsockaddr_in6)); -iff_index=if_nametoindex(iff_name); - -server_addr.sin6_family=AF_INET6; -server_addr.sin6_port=htons(port); -server_addr.sin6_scope_id=iff_index; -inet_pton(AF_INET6, ll_addr, &(server_addr.sin6_addr)); - -sk_fd=socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP); -connect(sk_fd, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_in6)); -``` - -#### Persistency Configuration of the IPv4 dhclient Daemon Process - -##### Overview -When the NetworkManager service is used to manage network services, if the ifcfg- configuration file of an interface is configured to obtain an IP address in DHCP mode, the NetworkManager service starts the dhclient daemon process to obtain an IP address from the DHCP server. - -The dhclient provides the -1 option to determine whether the dhclient process persistently attempts to request an IP address or exits after the request times out before receiving a response from the DHCP server. For the IPv4 dhclient daemon process, you can set PERSISTENT\_DHCLIENT in the ifcfg- configuration file to determine whether to set the persistence of the IPv4 dhclient process. - -##### Restrictions -1. If the ongoing dhclient process is killed, the network service cannot automatically start it. Therefore, you need to ensure the reliability. -2. If PERSISTENT\_DHCLIENT is configured, ensure that the corresponding DHCP server exists. If no DHCP server is available when the network service is started and the dhclient process continuously attempts to send request packets but does not receive any response, the network service is suspended until the network service times out. The network service starts the IPv4 dhclient processes of multiple NICs in serial mode. If persistency is configured for a NIC but the DHCP server is not ready, the network service will be suspended when obtaining an IPv4 address for the NIC. As a result, the NIC cannot obtain an IPv4 or IPv6 address. - -The preceding restrictions apply to special scenarios. You need to ensure reliability. - -##### Configuration Differences Between IPv4 DHCP and IPv6 DHCPv6 -You can configure the ifcfg- parameter on an interface to enable IPv4 and IPv6 to dynamically obtain IP addresses using DHCP or DHCPv6. The configuration is as follows: - -``` -BOOTPROTO=none|bootp|dhcp -DHCPV6C=yes|no -PERSISTENT_DHCLIENT=yes|no|1|0 -``` - -- BOOTPROTO: **none** indicates that an IPv4 address is statically configured. bootp|dhcp enables DHCP dhclient to dynamically obtain an IPv4 address. -- DHCPV6C: **no** indicates that an IPv6 address is statically configured, and **yes** indicates that the DHCPv6 dhclient is enabled to dynamically obtain the IPv6 address. -- PERSISTENT\_DHCLIENT: **no|0** indicates that the IPv4 dhclient process is configured as nonpersistent. If the dhclient sends a request packet to the DHCP server but does not receive any response, the dhclient exits after a period of time and the exit value is 2. **yes|1** indicates that the IPv4 dhclient process is configured to be persistent. The dhclient process repeatedly sends request packets to the DHCP server. **If PERSISTENT\_DHCLIENT is not configured, dhclient of IPv4 is set to yes|1 by default.** - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The PERSISTENT\_DHCLIENT configuration takes effect only for IPv4 and does not take effect for IPv6-related dhclient -6 processes. By default, the persistence configuration is not performed for IPv6. - - -#### Differences Between IPv4 and IPv6 Configuration Using the iproute Command - -##### Overview -IPv4 and IPv6 are two different protocol standards. Therefore, the iproute commands are different in usage. This section describes the differences between IPv4 and IPv6 commands in the iproute package. - -To run the iproute commands, you must have the root permission. - -##### Lifecycle of an IPv6 Address - - - - - - - - - - - - - - - - - - -

IPv6 status

-

Description

-

tentative

-

Temporary state: The newly added address is still in the DAD process.

-

preferred

-

Preferred state: The DAD process is complete, but no NA packet is received, indicating that the address does not conflict.

-

deprecated

-

Deprecated state: An address has a validity period (valid_lft or preferred_lft). After preferred_lft expires, the address changes to the deprecated state.

-

The address in this state cannot be used to create a new connection, but the original connection can still be used.

-

invalid

-

Invalid state: If the lease renewal fails after the preferred_lft time expires, the address status is set to invalid after the valid_lft time expires, indicating that the address cannot be used again.

-
- -Remarks: - -- preferred\_lft: preferred lifetime. The preferred\_lft address has not expired and can be used for normal communication. If there are multiple preferred addresses, the address is selected based on the kernel mechanism. -- valid\_lft: valid lifetime. The address cannot be used for creating new connections within the period of \[preferred\_lft, valid\_lft\]. The existing connections are still valid. - -##### ip link Command -The commands are as follows: - -``` -ip link set IFNAME mtu MTU -``` - -The minimum PMTU of IPv6 is 1280. If the MTU is set to a value smaller than 1280, IPv6 addresses will be lost. Other devices cannot ping the IPv6 address. - -##### ip addr Command -1. The commands are as follows: - - ``` - ip [-6] addr add IFADDR dev IFNAME - ``` - - You can choose to add the -6 option or not to add the IPv6 address. The ip addr command determines whether the address is an IPv4 address or an IPv6 address based on the address type. - - If the -6 option is specified but IFADDR is an IPv4 address, an error message is returned. - -2. The commands are as follows: - - ``` - ip [-6] addr add IFADDR dev IFNAME [home|nodad] - ``` - - \[home|nodad\] is valid only for IPv6 addresses. - - - home: specifies the home address defined in RFC 6275. \(This address is obtained by the mobile node from the home link, and is a permanent address of the mobile node. If the mobile node remains in the same home link, communication between various entities is performed normally.\) - - nodad: indicates that DAD is not performed when this IPv6 address is added. \(RFC 4862\) If multiple interfaces on a device are configured with the same IPv6 address through nodad, the IPv6 address is used in the interface sequence. An IPv6 address with both nodad and non-nodad cannot be added the same interface because the two IP addresses are the same. Otherwise, the message "RTNETLINK answers: File exists" is displayed. - -3. The commands are as follows: - - ``` - ip [-6] addr del IFADDR dev IFNAME - ``` - - You can choose to add the -6 option or not to delete an IPv6 address. The ip addr del command determines whether an IPv4 address or an IPv6 address is used based on the address type. - -4. The commands are as follows: - - ``` - ip [-6] addr show dev IFNAME [tentative|-tentative|deprecated|-deprecated|dadfailed|-dadfailed|temporary] - ``` - - - If the -6 option is not specified, both IPv4 and IPv6 addresses are displayed. If the -6 option is specified, only IPv6 addresses are displayed. - - \[tentative|-tentative|deprecated|-deprecated|dadfailed|-dadfailed|temporary\]. These options are only for IPv6. You can filter and view addresses based on the IPv6 address status. - 1. tentative: \(only for IPv6\) lists only the addresses that have not passed duplicate address detection \(DAD\). - 2. -tentative: \(only for IPv6\) lists only the addresses that are not in the DAD process. - 3. deprecated: \(only for IPv6\) lists only the deprecated addresses. - 4. -deprecated: \(only for IPv6\) lists only the addresses that are not deprecated. - 5. dadfailed: \(only for IPv6\) lists only the addresses that fail the DAD. - 6. -dadfailed: \(only for IPv6\) lists only the addresses that do not encounter DAD failures. - 7. temporary: \(only for IPv6\) lists only the temporary addresses. - - - -##### ip route Command -1. The commands are as follows: - - ``` - ip [-6] route add ROUTE [mtu lock MTU] - ``` - - - -6 option: You can add the -6 option or not when adding an IPv6 route. The ip route command determines whether an IPv4 or IPv6 address is used based on the address type. - - - mtu lock MTU: specifies the MTU of the locked route. If the MTU is not locked, the MTU value may be changed by the kernel during the PMTUD process. If the MTU is locked, PMTUD is not attempted. All IPv4 packets are not set with the DF bit and IPv6 packets are segmented based on the MTU. - -2. The commands are as follows: - - ``` - ip [-6] route del ROUTE - ``` - - You can choose whether to add the -6 option when deleting an IPv6 route. The ip route command determines whether an IPv4 address or an IPv6 address is used based on the address type. - - -##### ip rule command -1. The commands are as follows: - - ``` - ip [-6] rule list - ``` - - -6 option: If the -6 option is set, IPv6 policy-based routes are printed. If the -6 option is not set, IPv4 policy-based routes are printed. Therefore, you need to configure the -6 option according to the specific protocol type. - -2. The commands are as follows: - - ``` - ip [-6] rule [add|del] [from|to] ADDR table TABLE pref PREF - ``` - - -6 option: IPv6-related policy routing entries need to be configured with the -6 option. Otherwise, the error message "Error: Invalid source address." is displayed. Accordingly, the -6 option cannot be set for IPv4-related policy routing entries. Otherwise, the error message "Error: Invalid source address." is displayed. - - -#### Configuration Differences of the NetworkManager Service - -##### Overview -The NetworkManager service uses the ifup/ifdown logical interface definition to perform advanced network settings. Most of the parameters are set in the /etc/sysconfig/network and /etc/sysconfig/network-scripts/ifcfg- configuration files. The former is a global setting, and the latter is a setting of a specified NIC. When the two settings conflict, the latter takes effect. - -##### Configuration Differences -The configuration differences in /etc/sysconfig/network are as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

IPv4

-

IPv6

-

Description

-

NA

-

IPV6FORWARDING=yes|no

-

IPv6 forwarding. By default, IPv6 packets are not forwarded.

-

NA

-

IPV6_AUTOCONF=yes|no

-

If IPv6 forwarding is enabled, the value is no. Otherwise, the value is yes.

-

NA

-

IPV6_ROUTER=yes|no

-

If IPv6 forwarding is enabled, the value is yes. Otherwise, the value is no.

-

NA

-

IPV6_AUTOTUNNEL=yes|no

-

Indicates the automatic tunnel mode. The default value is no.

-

GATEWAY

-

IPV6_DEFAULTGW=<IPv6 address[%interface]> (optional)

-

Indicates the default gateway in IPv6.

-

NA

-

IPV6_DEFAULTDEV=<interface> (optional)

-

Specifies the default forwarding NIC.

-

NA

-

IPV6_RADVD_PIDFILE=<pid-file> (optional)

-

The default path of ipv6_radvd_pid is /var/run/radvd/radvd.pid.

-

NA

-

IPV6_RADVD_TRIGGER_ACTION=startstop|reload|restart|SIGHUP (optional)

-

Default radvd trigger action.

-
- -The differences in /etc/sysconfig/network-scripts/ifcfg- are as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

IPv4

-

IPv6

-

Description

-

IPADDRn

-

IPV6ADDR=<IPv6 address>[/<prefix length>]

-

indicates the IP address.

-

PREFIXn

-

NA

-

The network prefix, network alias, and PPP are invalid. The priority is higher than that of NETMASK.

-

NETMASKn

-

NA

-

Indicates the subnet mask. It is used only for the alias and PPP.

-

GATEWAY

-

IPV6_DEFAULTGW=<IPv6 address[%interface]> (optional)

-

Default gateway

-

MTU

-

IPV6_MTU=<MTU of link> (optional)

-

Default MTU

-

IPV4_FAILURE_FATAL=yes|no

-

IPV6_FAILURE_FATAL

-

The default value is no. If this parameter is set to yes, ifup-eth exits when dhclient fails.

-

NA

-

IPV6_PRIVACY=rfc3041

-

Disabled by default.

-

NA

-

IPV6INIT=yes|no

-

IPv6 is enabled by default.

-

NA

-

IPV6FORWARDING=yes|no

-

This function is disabled by default and has been discarded.

-
- -### FAQ - - - -#### The iscsi-initiator-utils Does Not Support the fe80 IPv6 Address. - -##### Symptom -When a client uses an IPv6 address to log in to the iSCSI server, run the iscsiadm -m node -p ipv6address -l command. If the global address is used, replace ipv6address in the command example with the global address. However, the link-local address \(IPv6 address starting with fe80\) cannot be used because the current mechanism of iscsi-initiator-utils does not support the link-local address to log in to the iSCSI server. - -##### Possible Cause -If you log in to the system using the iscsiadm -m node -p fe80::xxxx -l format, a login timeout error is returned. This is because you must specify an interface when using the link-local address. Otherwise, the iscsi\_io\_tcp\_connect function fails to invoke the connect function, and the standard error code 22 is generated. - -If you use the iscsiadm -m node -p fe80::xxxx%enp3s0 -l format for login, the iscsi\_addr\_match function will compare the address fe80::xxxx%enp3s0 with the address fe80::xxxx in the node information returned by the server. The comparison result does not match, causing the login failure. - -Therefore, **the current mechanism of iscsi-initiator-utils does not support login to the iSCSI server using a link-local address.** - -#### The IPv6 Address Is Lost After the NIC Is Down. - -##### Symptom -Run the ip link down+up NIC or ifconfig down+up NIC command to disable the NIC and then enable it to go online. Check the IP address configured on the NIC. It is found that the IPv4 address is not lost but the configured IPv6 address is lost. - -##### Possible Cause -According to the processing logic in the kernel, if the NIC is set to the down state, all IPv4 and IPv6 addresses will be cleared. After the NIC is set to the up state, the IPv4 address is automatically restored, and the automatically configured IPv6 link-local address on the NIC is also restored. However, other IPv6 addresses are lost by default. To retain these IPv6 addresses, run the **sysctl -w net.ipv6.conf.\< _NIC name_ \>.keep\_addr\_on\_down=1** command. - -#### Taking a Long Time to Add or Delete an IPv6 Address for a Bond Interface with Multiple IPv6 Addresses - -##### Symptom -When users run the following command to add or delete \(including flush\) an IPv6 address, the waiting time increases linearly along with the number of IPv6 addresses configured on a bond interface. **X** is the least significant 16 bits that dynamically change. For example, it takes about five minutes to add 3000 IPv6 address to or delete them from a bond interface that already has four physical NICs using a single thread, while for a common physical NIC, it takes less than 10 seconds. - -``` -ip a add/del 192:168::18:X/64 dev DEVICE -``` - -##### Possible Cause -When an IPv6 address is added to a bond interface, the IPv6 multicast address is generated and synchronized to all physical NICs. The time required increases with the number of IPv6 addresses. As a result, it takes a too long time. - -##### Solution -The IPv6 multicast address is generated by combining the least significant 24 bits of the IPv6 address and 33-33-ff. If there are too many multicast addresses, it takes a long time to add or delete the address. If there are a few multicast addresses, the time required is not affected. - -It is recommended that you set the least significant 24 bits of the IPv6 address to be the same as the most significant 24 bits of the IPv6 address. In this way, a single NIC can communicate with external devices using only one IP address in a network segment. - -#### Rsyslog Log Transmission Is Delayed in the Scenario Where Both IPv4 and IPv6 Are Used - -##### Symptom -When both IPv4 and IPv6 addresses are configured in the configuration file of the rsyslog client and the port configurations are the same, there is a possibility that log output is delayed when the server collects logs. - -##### Possible Cause -The delay is caused by the buffer queue mechanism of rsyslog. By default, rsyslog writes data to a file only when the number of buffer queues reaches a specified value. - -##### Solution -You can disable the buffer queue mechanism by configuring the Direct mode as the **root** user. Add the following information at the beginning of the new remote transmission configuration file in the /etc/rsyslog.d directory on the rsyslog remote transmission server: - -``` -$ActionQueueType Direct -$MainMsgQueueType Direct -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- In direct mode, the queue size is reduced by 1. Therefore, one log is reserved in the queue for the next log output. ->- The direct mode degrades the rsyslog performance of the server. diff --git a/docs/en/docs/Administration/configuring-the-repo-server.md b/docs/en/docs/Administration/configuring-the-repo-server.md deleted file mode 100644 index 35bde9dbab1874b010c6e5cde74c2840e6751829..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/configuring-the-repo-server.md +++ /dev/null @@ -1,407 +0,0 @@ -# Configuring the Repo Server - ->![](./public_sys-resources/icon-note.gif) **NOTE:** -> openEuler provides multiple repo sources for users online. For details about the repo sources, see [System Installation](./../Releasenotes/installing-the-os.html). If you cannot obtain the openEuler repo source online, you can use the ISO release package provided by openEuler to create a local openEuler repo source. This section uses the **openEuler-21.03-aarch64-dvd.iso** file as an example. Modify the ISO file as required. - - - -- [Configuring the Repo Server](#configuring-the-repo-server) - - [Overview](#overview) - - [Creating or Updating a Local Repo Source](#creating-or-updating-a-local-repo-source) - - [Obtaining the ISO File](#obtaining-the-iso-file) - - [Mounting an ISO File to Create a Repo Source](#mounting-an-iso-file-to-create-a-repo-source) - - [Creating a Local Repo Source](#creating-a-local-repo-source) - - [Updating the Repo Source](#updating-the-repo-source) - - [Deploying the Remote Repo Source](#deploying-the-remote-repo-source) - - [Installing and Configuring Nginx](#installing-and-configuring-nginx) - - [Starting Nginx](#starting-nginx) - - [Deploying the Repo Source](#deploying-the-repo-source) - - [Using the repo Source](#using-the-repo-source) - - [Configuring repo as the yum Source](#configuring-repo-as-the-yum-source) - - [repo Priority](#repo-priority) - - [Related Commands of dnf](#related-commands-of-dnf) - - -## Overview - -Create the **openEuler-21.03-aarch64-dvd.iso** file provided by openEuler as the repo source. The following uses Nginx as an example to describe how to deploy the repo source and provide the HTTP service. - -## Creating or Updating a Local Repo Source - -Mount the openEuler ISO file **openEuler-21.03-aarch64-dvd.iso** to create and update a repo source. - -### Obtaining the ISO File - -Obtain the openEuler ISO file from the following website: - -[https://repo.openeuler.org/openEuler-21.03/ISO/](https://repo.openeuler.org/openEuler-21.03/ISO/) - -### Mounting an ISO File to Create a Repo Source - -Run the mount command as the **root** user to mount the ISO file. - -The following is an example: - -``` -# mount /home/openEuler/openEuler-21.03-aarch64-dvd.iso /mnt/ -``` - -The mounted mnt directory is as follows: - -``` -. -│── boot.catalog -│── docs -│── EFI -│── images -│── Packages -│── repodata -│── TRANS.TBL -└── RPM-GPG-KEY-openEuler -``` - -In the preceding command, **Packages** indicates the directory where the RPM package is stored, **repodata** indicates the directory where the repo source metadata is stored, and **RPM-GPG-KEY-openEuler** indicates the public key for signing openEuler. - -### Creating a Local Repo Source - -You can copy related files in the ISO file to a local directory to create a local repo source. The following is an example: - -``` -# mount /home/openEuler/openEuler-21.03-aarch64-dvd.iso /mnt/ -$ mkdir -p ~/srv/repo/ -$ cp -r /mnt/Packages ~/srv/repo/ -$ cp -r /mnt/repodata ~/srv/repo/ -$ cp -r /mnt/RPM-GPG-KEY-openEuler ~/srv/repo/ -``` - -The local repo directory is as follows: - -``` -. -│── Packages -│── repodata -└── RPM-GPG-KEY-openEuler -``` - -**Packages** indicates the directory where the RPM package is stored, **repodata** indicates the directory where the repo source metadata is stored, and **RPM-GPG-KEY-openEuler** indicates the public key for signing openEuler. - -### Updating the Repo Source - -You can update the repo source in either of the following ways: - -- Use the ISO file of the new version to update the existing repo source. The method is the same as that for creating a repo source. That is, mount the ISO file or copy the ISO file to the local directory. - -- Add rpm packages to the Packages directory of the repo source and run the createrepo command to update the repo source. - - ``` - $ createrepo --update --workers=10 ~/srv/repo - ``` - -In this command, --update indicates the update, and --workers indicates the number of threads, which can be customized. - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -If the command output contains "createrepo: command not found", run the **dnf install createrepo** command as the **root** user to install the **createrepo** softeware. - -## Deploying the Remote Repo Source - -Install openEuler OS and deploy the repo source using Nginx on openEuler OS. - -### Installing and Configuring Nginx - -1. Download the Nginx tool and install it as the **root** user. - -2. After installing Nginx, configure /etc/nginx/nginx.conf as the **root** user. - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** -The configuration content in this document is for reference only. You can configure the content based on the site requirements (for example, security hardening requirements). - - ``` - user nginx; - worker_processes auto; # You are advised to set this parameter to core-1. - error_log /var/log/nginx/error.log warn; # log storage location - pid /var/run/nginx.pid; - - events { - worker_connections 1024; - } - - http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - sendfile on; - keepalive_timeout 65; - - server { - listen 80; - server_name localhost; #Server name (URL) - client_max_body_size 4G; - root /usr/share/nginx/repo; #Default service directory - - location / { - autoindex on; # Enable the access to lower-layer files in the directory. - autoindex_exact_size on; - autoindex_localtime on; - } - - } - - } - ``` - -### Starting Nginx - -1. Run the systemd command as the **root** user to start the Nginx service. - - ``` - # systemctl enable nginx - # systemctl start nginx - ``` - -2. You can run the following command to check whether the Nginx is started successfully: - - ``` - $ systemctl status nginx - ``` - - - [Figure 1](#en-us_topic_0151920971_fd25e3f1d664b4087ae26631719990a71) indicates that the Nginx service is started successfully. - - **Figure 1** The Nginx service is successfully started. -![](./figures/the-nginx-service-is-successfully-started.png "the-nginx-service-is-successfully-started") - - - If the Nginx service fails to be started, view the error information. - - ``` - $ systemctl status nginx.service --full - ``` - - **Figure 2** Nginx startup failure - ![](./figures/nginx-startup-failure.png "nginx-startup-failure") - - As shown in [Figure 2](#en-us_topic_0151920971_f1f9f3d086e454b9cba29a7cae96a4c54), the Nginx service fails to be created because the /var/spool/nginx/tmp/client\_body directory fails to be created. You need to manually create the directory as the **root** user. Similar problems are solved as follows: - - ``` - # mkdir -p /var/spool/nginx/tmp/client_body - # mkdir -p /var/spool/nginx/tmp/proxy - # mkdir -p /var/spool/nginx/tmp/fastcgi - # mkdir -p /usr/share/nginx/uwsgi_temp - # mkdir -p /usr/share/nginx/scgi_temp - ``` - -### Deploying the Repo Source - -1. Run the following command as the **root** user to create the /usr/share/nginx/repo directory specified in the Nginx configuration file /etc/nginx/nginx.conf: - - ``` - # mkdir -p /usr/share/nginx/repo - ``` - -2. Run the followding command as the **root** user to modify the /usr/share/nginx/repo directory permission: - - ``` - # chmod -R 755 /usr/share/nginx/repo - ``` - -3. Configure firewall rules as the **root** user to enable the port (port 80) configured for Nginx. - - ``` - # firewall-cmd --add-port=80/tcp --permanent - # firewall-cmd --reload - ``` - - Check whether port 80 is enabled as the **root** user. If the output is **yes**, port 80 is enabled. - - ``` - # firewall-cmd --query-port=80/tcp - ``` - - You can also enable port 80 using iptables as the **root** user. - - ``` - # iptables -I INPUT -p tcp --dport 80 -j ACCEPT - ``` - -4. After the Nginx service is configured, you can use the IP address to access the web page, as shown in [Figure 3](#en-us_topic_0151921017_fig1880404110396). - - **Figure 3** Nginx deployment succeeded -![](./figures/nginx-deployment-succeeded.png "nginx-deployment-succeeded") - -5. Use either of the following methods to add the repo source to the /usr/share/nginx/repo directory: - - - Copy related files in the image to the /usr/share/nginx/repo directory as the **root** user. - - ``` - # mount /home/openEuler/openEuler-21.03-aarch64-dvd.iso /mnt/ - # cp -r /mnt/Packages /usr/share/nginx/repo/ - # cp -r /mnt/repodata /usr/share/nginx/repo/ - # cp -r /mnt/RPM-GPG-KEY-openEuler /usr/share/nginx/repo/ - # chmod -R 755 /usr/share/nginx/repo - ``` - - The **openEuler-21.03-aarch64-dvd.iso** file is stored in the **/home/openEuler** directory. - - - Create a soft link for the repo source in the /usr/share/nginx/repo directory as the **root** user. - - ``` - # ln -s /mnt /usr/share/nginx/repo/os - ``` - - /mnt is the created repo source, and /usr/share/nginx/repo/os points to /mnt. - -## Using the repo Source - -The repo source can be configured as a yum source. Yellow dog Updater,Modified (yum for short) is a shell front-end software package manager. Based on the Redhat package manager (RPM), YUM can automatically download the rpm package from the specified server, install the package, and process dependent relationship. It supports one-off installation for all dependent software packages. - -### Configuring repo as the yum Source - -You can configure the built repo as the yum source and create the \*\*\*.repo configuration file (the extension .repo is mandatory) in the /etc/yum.repos.d/ directory as the **root** user. You can configure the yum source on the local host or HTTP server. - -- Configuring the local yum source. - - Create the **openEuler.repo** file in the **/etc/yum.repos.d** directory and use the local repository as the yum source. The content of the **openEuler.repo** file is as follows: - - ``` - [base] - name=base - baseurl=file:///home/openEuler/srv/repo - enabled=1 - gpgcheck=1 - gpgkey=file:///home/openEuler/srv/repo/RPM-GPG-KEY-openEuler - ``` - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > - > - The repoid in \[repoid\] indicates the ID of the software repository. Repoids in all .repo configuration files must be unique. In the example, repoid is set to **base**. - > - **name** indicates the string that the software repository describes. - > - **baseurl** indicates the address of the software repository. - > - **enabled** indicates whether to enable the software source repository. The value can be **1** or **0**. The default value is **1**, indicating that the software source repository is enabled. - > - **gpgcheck** indicates whether to enable the GNU privacy guard (GPG) to check the validity and security of sources of RPM packages. **1** indicates GPG check is enabled. **0** indicates the GPG check is disabled. If this option is not specified, the GPG check is enabled by default. - > - **gpgkey** is the public key used to verify the signature. - -- Configuring the yum source for the HTTP server - - Create the **openEuler.repo** file in the **/etc/yum.repos.d** directory. - - - If the repo source of the HTTP server deployed by the user is used as the yum source, the content of **openEuler.repo** is as follows: - - ``` - [base] - name=base - baseurl=http://192.168.139.209/ - enabled=1 - gpgcheck=1 - gpgkey=http://192.168.139.209/RPM-GPG-KEY-openEuler - ``` - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > 192.168.139.209 is an example. Replace it with the actual IP address. - - - If the openEuler repo source provided by openEuler is used as the yum source, the content of **openEuler.repo** is as follows (the aarch64-based OS repo source is used as an example): - - ``` - [base] - name=base - baseurl=http://repo.openeuler.org/openEuler-21.03/OS/aarch64/ - enabled=1 - gpgcheck=1 - gpgkey=http://repo.openeuler.org/openEuler-21.03/OS/aarch64/RPM-GPG-KEY-openEuler - ``` - -### repo Priority - -If there are multiple repo sources, you can set the repo priority in the .repo file. If the priority is not set, the default priority is 99. If the same RPM package exists in the sources with the same priority, the latest version is installed. **1** indicates the highest priority and **99** indicates the lowest priority. For example, set the priority of **openEuler.repo** to **2**. - -``` -[base] -name=base -baseurl=http://192.168.139.209/ -enabled=1 -priority=2 -gpgcheck=1 -gpgkey=http://192.168.139.209/RPM-GPG-KEY-openEuler -``` - -### Related Commands of dnf - -The dnf command can automatically parse the dependency between packages during installation and upgrade. The common usage method is as follows: - -``` -dnf -``` - -Common commands are as follows: - -- Installation - - Run the following command as the **root** user. - - ``` - # dnf install - ``` - -- Upgrade - - Run the following command as the **root** user. - - ``` - # dnf update - ``` - -- Rollback - - Run the following command as the **root** user. - - ``` - # dnf downgrade - ``` - -- Checking for update - - ``` - $ dnf check-update - ``` - -- Uninstallation - - Run the following command as the **root** user. - - ``` - # dnf remove - ``` - -- Query - - ``` - $ dnf search - ``` - -- Local installation - - Run the following command as the **root** user. - - ``` - # dnf localinstall - ``` - -- Viewing historical records - - ``` - $ dnf history - ``` - -- Clearing cache records - - ``` - $ dnf clean all - ``` - -- Updating cache - - ``` - $ dnf makecache - ``` \ No newline at end of file diff --git a/docs/en/docs/Administration/configuring-the-web-server.md b/docs/en/docs/Administration/configuring-the-web-server.md deleted file mode 100644 index c915961e7b8231156bd3d35f4d58db3afc153345..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/configuring-the-web-server.md +++ /dev/null @@ -1,545 +0,0 @@ -# Configuring the Web Server - - -- [Configuring the Web Server](#configuring-the-web-server) - - [Apache Server](#apache-server) - - [Overview](#overview) - - [Managing httpd](#managing-httpd) - - [Configuration File Description](#configuration-file-description) - - [Management Module and SSL](#management-module-and-ssl) - - [Verifying Whether the Web Service Is Successfully Set Up](#verifying-whether-the-web-service-is-successfully-set-up) - - [Nginx Server](#nginx-server) - - [Overview](#overview-1) - - [Installing Nginx](#installing-nginx) - - [Managing Nginx](#managing-nginx) - - [Configuration File Description](#configuration-file-description-1) - - [Management Modules](#management-modules) - - [Verifying Whether the Web Service Is Successfully Set Up](#verifying-whether-the-web-service-is-successfully-set-up-1) - - - -## Apache Server - - - - -### Overview - -World Wide Web \(Web\) is one of the most commonly used Internet protocols. At present, the web server in the Unix-Like system is mainly implemented through the Apache server software. To operate dynamic websites, LAMP \(Linux + Apache + MySQL + PHP\) is developed. Web services can be combined with multimedia such as text, graphics, images, and audio, and support information transmission through hyperlinks. - -The web server version in the openEuler system is Apache HTTP server 2.4, that is, httpd, which is an open-source web server developed by the Apache Software Foundation. - -### Managing httpd - -#### Overview -You can use the systemctl tool to manage the httpd service, including starting, stopping, and restarting the service, and viewing the service status. This section describes how to manage the Apache HTTP service. - -#### Prerequisites -- To use the Apache HTTP service, ensure that the rpm package of the httpd service has been installed in your system. Run the following command as the **root** user to install the rpm package: - - ``` - # dnf install httpd - ``` - - For more information about service management, see [Service Management](./service-management.html). - -- To start, stop, and restart the httpd service, you must have the root permission. - -#### Starting a Service -- Run the following command to start and run the httpd service: - - ``` - # systemctl start httpd - ``` - - -- If you want the httpd service to automatically start when the system starts, the command and output are as follows: - - ``` - # systemctl enable httpd - Created symlink /etc/systemd/system/multi-user.target.wants/httpd.service → /usr/lib/systemd/system/httpd.service. - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If the running Apache HTTP server functions as a secure server, a password is required after the system is started. The password is an encrypted private SSL key. - -#### Stopping the Service -- Run the following command to stop the httpd service: - - ``` - # systemctl stop httpd - ``` - -- If you want to prevent the service from automatically starting during system startup, the command and output are as follows: - - ``` - # systemctl disable httpd - Removed /etc/systemd/system/multi-user.target.wants/httpd.service. - ``` - - -#### Restarting a Service -You can restart the service in any of the following ways: - -- Restart the service by running the restart command: - - ``` - # systemctl restart httpd - ``` - - This command stops the ongoing httpd service and restarts it immediately. This command is generally used after a service is installed or when a dynamically loaded module \(such as PHP\) is removed. - -- Reload the configuration. - - ``` - # systemctl reload httpd - ``` - - This command causes the running httpd service to reload its configuration file. Any requests that are currently being processed will be interrupted, causing the client browser to display an error message or re-render some pages. - -- Re-load the configuration without affecting the activation request. - - ``` - # apachectl graceful - ``` - - This command causes the running httpd service to reload its configuration file. Any requests that are currently being processed will continue to use the old configuration file. - - -#### Verifying the Service Status -Check whether the httpd service is running. - -``` -$ systemctl is-active httpd -``` - -If active is displayed in the command output, the service is running. - -### Configuration File Description - -After the httpd service is started, it reads the configuration file shown in [Table 1](#table24341012096) by default. - -**Table 1** Configuration file description - - - - - - - - - - - - - -

File

-

Description

-

/etc/httpd/conf/httpd.conf

-

Main configuration files.

-

/etc/httpd/conf.d

-

Secondary directory of configuration files, which are also contained in the main configuration file.

-

The secondary directory of a configuration file is contained in the main configuration file.

-
- -Although the default configuration can be used in most cases, you need to be familiar with some important configuration items. After the configuration file is modified, run the following command as the **root** user to check the syntax errors that may occur in the configuration file: - -``` -# apachectl configtest -``` - -If the following information is displayed, the syntax of the configuration file is correct: - -``` -Syntax OK -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- Before modifying the configuration file, back up the original file so that the configuration file can be quickly restored if a fault occurs. ->- The modified configuration file takes effect only after the web service is restarted. - -### Management Module and SSL - -#### Overview -The httpd service is a modular application that is distributed with many Dynamic Shared Objects \(DSOs\). DSOs can be dynamically loaded or unloaded when running if necessary. These modules are located in the /usr/lib64/httpd/modules/ directory of the server operating system. This section describes how to load and write a module. - -#### Loading a Module -To load a special DSO module, you can use the load module indication in the configuration file. The modules provided by the independent software package have their own configuration files in the /etc/httpd/conf.modules.d directory. - -For example, to load the asis DSO module, perform the following steps: - -1. In the /etc/httpd/conf.modules.d/00-optional.conf file, uncomment the following configuration line as the **root** user: - - ``` - LoadModule asis_module modules/mod_asis.so - ``` - -2. After the loading is complete, restart the httpd service as the **root** user to reload the configuration file. - - ``` - # systemctl restart httpd - ``` - -3. After the loading is complete, run the httpd -M command as the **root** user to check whether the asis DSO module is loaded. - - ``` - # httpd -M | grep asis - ``` - - If the following information is displayed, the asis DSO module is successfully loaded: - - ``` - asis_module (shared) - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->**Common httpd commands** ->- httpd -v: views the httpd version number. ->- httpd -l: views the static modules compiled into the httpd program. ->- httpd -M: views the static modules and loaded dynamic modules that have been compiled into the httpd program. - -#### Introduction to SSL -Secure Sockets Layer \(SSL\) is an encryption protocol that allows secure communication between the server and client. The Transport Layer Security \(TLS\) protocol ensures security and data integrity for network communication. openEuler supports Mozilla Network Security Services \(NSS\) as the security protocol TLS. To load the SSL, perform the following steps: - -1. Install the **mod\_ssl** RPM package as the **root** user. - - ``` - # dnf install mod_ssl - ``` - -2. After the loading is complete, restart the httpd service as the **root** user to reload the configuration file. - - ``` - # systemctl restart httpd - ``` - -3. After the loading is complete, run the **httpd -M** command as the **root** user to check whether the SSL is loaded. - - ``` - # httpd -M | grep ssl - ``` - - If the following information is displayed, the SSL has been loaded successfully. - - ``` - ssl_module (shared) - ``` - - -### Verifying Whether the Web Service Is Successfully Set Up - -After the web server is set up, perform the following operations to check whether the web server is set up successfully: - -1. Run the following command as the **root** user to check the IP address of the server: - - ``` - # ifconfig - ``` - - If the following information is displayed, the IP address of the server is 192.168.1.60. - - ``` - enp3s0: flags=4163 mtu 1500 - inet 192.168.1.60 netmask 255.255.255.0 broadcast 192.168.1.255 - inet6 fe80::5054:ff:fe95:499f prefixlen 64 scopeid 0x20 - ether 52:54:00:95:49:9f txqueuelen 1000 (Ethernet) - RX packets 150713207 bytes 49333673733 (45.9 GiB) - RX errors 0 dropped 43 overruns 0 frame 0 - TX packets 2246438 bytes 203186675 (193.7 MiB) - TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 - - enp4s0: flags=4163 mtu 1500 - ether 52:54:00:7d:80:9e txqueuelen 1000 (Ethernet) - RX packets 149937274 bytes 44652889185 (41.5 GiB) - RX errors 0 dropped 1102561 overruns 0 frame 0 - TX packets 0 bytes 0 (0.0 B) - TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 - - lo: flags=73 mtu 65536 - inet 127.0.0.1 netmask 255.0.0.0 - inet6 ::1 prefixlen 128 scopeid 0x10 - loop txqueuelen 1000 (Local Loopback) - RX packets 37096 bytes 3447369 (3.2 MiB) - RX errors 0 dropped 0 overruns 0 frame 0 - TX packets 37096 bytes 3447369 (3.2 MiB) - TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 - ``` - -2. Configure the firewall as the **root** user. - - ``` - # firewall-cmd --add-service=http --permanent - success - # firewall-cmd --reload - success - ``` - -3. Verify whether the web server is successfully set up. You can select the Linux or Windows operating system for verification. - - Using the Linux OS - - Run the following command to check whether the web page can be accessed. If the service is successfully set up, the web page can be accessed. - - ``` - $ curl http://192.168.1.60 - ``` - - Run the following command to check whether the command output is 0. If the command output is 0, the httpd server is successfully set up. - - ``` - $ echo $? - ``` - - - Using the Windows OS - - Open the browser and enter the following address in the address box. If the web page can be accessed, the httpd server is successfully set up. - - http://192.168.1.60 - - If the port number is changed, enter the address in the following format: - - http://192.168.1.60: port number - - - -## Nginx Server - - - -### Overview - -Nginx is a lightweight web server which also acts as a reverse proxy server and email \(IMAP/POP3\) proxy server. It features low memory usage and strong concurrency capability. Nginx supports FastCGI, SSL, virtual hosts, URL rewrite, Gzip, and extension of many third-party modules. - -### Installing Nginx - -1. Configure the local yum source. For details, see [Configuring the Repo Server](./configuring-the-repo-server.html). -2. Clear the cache. - - ``` - $ dnf clean all - ``` - -3. Create a cache. - - ``` - $ dnf makecache - ``` - -4. Install the Nginx server as the **root** user. - - ``` - # dnf install nginx - ``` - -5. Check the installed RPM package. - - ``` - $ dnf list all | grep nginx - ``` - - -### Managing Nginx - -#### Overview -You can use the systemctl tool to manage the Nginx service, including starting, stopping, and restarting the service, and viewing the service status. This section describes how to manage the Nginx service. - -#### Prerequisites -- Ensure that the Nginx service has been installed. If not, install it by referring to [Installing Nginx](#installing-nginx). - - For more information about service management, see [Service Management](./service-management.html). - -- To start, stop, and restart the Nginx service, you must have the **root** permission. - -#### Starting a Service -- Run the following command to start and run the Nginx service: - - ``` - # systemctl start nginx - ``` - - -- If you want the Nginx service to automatically start when the system starts, the command and output are as follows: - - ``` - # systemctl enable nginx - Created symlink /etc/systemd/system/multi-user.target.wants/nginx.service → /usr/lib/systemd/system/nginx.service. - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If the running Nginx server functions as a secure server, a password is required after the system is started. The password is an encrypted private SSL key. - -#### Stopping the Service -- Run the following command to stop the Nginx service: - - ``` - # systemctl stop nginx - ``` - -- If you want to prevent the service from automatically starting during system startup, the command and output are as follows: - - ``` - # systemctl disable nginx - Removed /etc/systemd/system/multi-user.target.wants/nginx.service. - ``` - - -#### Restarting a Service -You can restart the service in any of the following ways: - -- Restart the service. - - ``` - # systemctl restart nginx - ``` - - This command stops the ongoing Nginx service and restarts it immediately. This command is generally used after a service is installed or when a dynamically loaded module \(such as PHP\) is removed. - -- Reload the configuration. - - ``` - # systemctl reload nginx - ``` - - This command causes the running Nginx service to reload its configuration file. Any requests that are currently being processed will be interrupted, causing the client browser to display an error message or re-render some pages. - -- Smoothly restart Nginx. - - ``` - # kill -HUP PID - ``` - - This command causes the running Nginx service to reload its configuration file. Any requests that are currently being processed will continue to use the old configuration file. - - -#### Verifying the Service Status -Check whether the Nginx service is running. - -``` -$ systemctl is-active nginx -``` - -If **active** is displayed in the command output, the service is running. - -### Configuration File Description - -After the Nginx service is started, it reads the configuration file shown in [Table 2](#table24341012096) by default. - -**Table 2** Configuration file description - - - - - - - - - - - - - -

File

-

Description

-

/etc/nginx/nginx.conf

-

Main configuration files.

-

/etc/nginx/conf.d

-

Secondary directory of configuration files, which are also contained in the main configuration file.

-

The secondary directory of a configuration file is contained in the main configuration file.

-
- -Although the default configuration can be used in most cases, you need to be familiar with some important configuration items. After the configuration file is modified, run the following command as the **root** user to check the syntax errors that may occur in the configuration file: - -``` -# nginx -t -``` - -If the command output contains **syntax is ok**, the syntax of the configuration file is correct. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- Before modifying the configuration file, back up the original file so that the configuration file can be quickly restored if a fault occurs. ->- The modified configuration file takes effect only after the web service is restarted. - -### Management Modules - -#### Overview -The Nginx service is a modular application that is distributed with many Dynamic Shared Objects \(DSOs\). DSOs can be dynamically loaded or unloaded when running if necessary. These modules are located in the **/usr/lib64/nginx/modules/** directory of the server operating system. This section describes how to load and write a module. - -#### Loading a Module -To load a special DSO module, you can use the load module indication in the configuration file. Generally, the modules provided by independent software packages have their own configuration files in the **/usr/share/nginx/modules** directory. - -The DSO is automatically loaded when the **dnf install nginx** command is used to install the Nginx in the openEuler operating system. - -### Verifying Whether the Web Service Is Successfully Set Up - -After the web server is set up, perform the following operations to check whether the web server is set up successfully: - -1. Run the following command as the **root** user to check the IP address of the server: - - ``` - # ifconfig - ``` - - If the following information is displayed, the IP address of the server is **192.168.1.60**. - - ``` - enp3s0: flags=4163 mtu 1500 - inet 192.168.1.60 netmask 255.255.255.0 broadcast 192.168.1.255 - inet6 fe80::5054:ff:fe95:499f prefixlen 64 scopeid 0x20 - ether 52:54:00:95:49:9f txqueuelen 1000 (Ethernet) - RX packets 150713207 bytes 49333673733 (45.9 GiB) - RX errors 0 dropped 43 overruns 0 frame 0 - TX packets 2246438 bytes 203186675 (193.7 MiB) - TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 - - enp4s0: flags=4163 mtu 1500 - ether 52:54:00:7d:80:9e txqueuelen 1000 (Ethernet) - RX packets 149937274 bytes 44652889185 (41.5 GiB) - RX errors 0 dropped 1102561 overruns 0 frame 0 - TX packets 0 bytes 0 (0.0 B) - TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 - - lo: flags=73 mtu 65536 - inet 127.0.0.1 netmask 255.0.0.0 - inet6 ::1 prefixlen 128 scopeid 0x10 - loop txqueuelen 1000 (Local Loopback) - RX packets 37096 bytes 3447369 (3.2 MiB) - RX errors 0 dropped 0 overruns 0 frame 0 - TX packets 37096 bytes 3447369 (3.2 MiB) - TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 - ``` - -2. Configure the firewall as the **root** user. - - ``` - # firewall-cmd --add-service=http --permanent - success - # firewall-cmd --reload - success - ``` - -3. Verify whether the web server is successfully set up. You can select the Linux or Windows operating system for verification. - - Using the Linux OS - - Run the following command to check whether the web page can be accessed. If the service is successfully set up, the web page can be accessed. - - ``` - $ curl http://192.168.1.60 - ``` - - Run the following command to check whether the command output is **0**. If the command output is **0**, the Nginx server is successfully set up. - - ``` - $ echo $? - ``` - - - Using the Windows OS - - Open the browser and enter the following address in the address box. If the web page can be accessed, the Nginx server is successfully set up. - - http://192.168.1.60 - - If the port number is changed, enter the address in the following format: - - http://192.168.1.60: port number diff --git a/docs/en/docs/Administration/faqs.md b/docs/en/docs/Administration/faqs.md deleted file mode 100644 index 5fc145d13b938d547d649182d524a22e2eb9c73d..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/faqs.md +++ /dev/null @@ -1,164 +0,0 @@ -# FAQs - - -- [FAQs](#faqs) - - [Why Is the Memory Usage of the libvirtd Service Queried by Running the systemctl and top Commands Different?](#why-is-the-memory-usage-of-the-libvirtd-service-queried-by-running-the-systemctl-and-top-commands-different) - - [An Error Occurs When stripsize Is Set to 4 During RAID 0 Volume Configuration](#an-error-occurs-when-stripsize-is-set-to-4-during-raid-0-volume-configuration) - - [Failed to Compile MariaDB Using rpmbuild](#failed-to-compile-mariadb-using-rpmbuild) - - [Failed to Start the SNTP Service Using the Default Configuration](#failed-to-start-the-sntp-service-using-the-default-configuration) - - [Installation Failure Caused by Software Package Conflict, File Conflict, or Missing Software Package](#installation-failure-caused-by-software-package-conflict-file-conflict-or-missing-software-package) - - -## Why Is the Memory Usage of the libvirtd Service Queried by Running the systemctl and top Commands Different? - -### Symptom -The output of the **systemctl** and **systemd-cgtop** commands shows that the libvirtd service occupies more than 1.5 GB memory, but the output of the **top** command shows that the libvirtd service occupies about 70 MB memory. - -### Possible Cause -The memory displayed in the services \(including systemctl and systemd-cgtop\) managed by systemd can be obtained from **memory.usage\_in\_bytes** in Cgroup. Running the **top** command is to query the memory information in the **/proc** directory. The query results are different because the statistical method varies. - -Generally, the memory used by service processes has the following types: - -- anon\_rss: anonymous pages in user mode address spaces, for example, memory allocated by calling the malloc function or the mmap function with configured **MAP\_ANONYMOUS**. When the system memory is insufficient, this type of memory can be swapped by the kernel. -- file\_rss: mapped pages in user mode address spaces, including map file \(such as mmap of a specified file\) and map tmpfs \(such as IPC shared memory\). When the system memory is insufficient, the kernel can reclaim these pages. Data may need to be synchronized between the kernel and map file before reclamation. -- file\_cache: file cache \(page in page cache of disk file\), which is generated when a file is read or written. When the system memory is insufficient, the kernel can reclaim these pages. Data may need to be synchronized between the kernel and map file before reclamation. -- buffer pages: belongs to page cache, for example, cache generated when block device files are read. - -anon\_rss and file\_rss belong to the resident set size \(RSS\) of processes, and file\_cache and buffer pages belong to page cache. In brief: - -RSS in the output of the **top** command = anon\_rss + file\_rss; Shared memory \(SHR\) = file\_rss - -**memory.usage\_in\_bytes** in Cgroup = cache + RSS + swap - -In conclusion, the definition of memory usage obtained by running the **systemd** command is different from that obtained by running the **top** command. Therefore, the query results are different. - -## An Error Occurs When stripsize Is Set to 4 During RAID 0 Volume Configuration - -### Symptom -An error occurs when the **stripsize** parameter is set to **4** during RAID 0 volume configuration. - -### Possible Cause -The 64 KB page table can be enabled only in the scenario where **stripsize** is set to **64**. - -### Solution -You do not need to modify the configuration file. When running the **lvcreate** command on openEuler, set **stripesize** to **64** because the minimum supported stripe size is 64 KB. - -## Failed to Compile MariaDB Using rpmbuild - -### Symptom -When you log in to the system as user **root** and run the **rpmbuild** command to compile the MariaDB source code, the compilation fails and the following information is displayed: - -``` -+ echo 'mysql can'\''t run test as root' -mysql can't run test as root -+ exit 1 -``` - -### Possible Cause -The MariaDB does not allow user **root** to execute test cases. However, test cases are automatically executed during compilation. As a result, the compilation process is blocked. - -### Solution -Use a text editor, such as vi, to modify the value of the **runtest** variable in the **mariadb.spec** file. - -Before the modification: - -``` -%global runtest 1 -``` - -After the modification: - -``` -%global runtest 0 -``` - -The modification disables the function of executing test cases during compilation, which does not affect the compilation and the RPM package content after compilation. - -## Failed to Start the SNTP Service Using the Default Configuration - -### Symptom -The SNTP service fails to be started with the default configuration. - -### Possible Cause -The domain name of the NTP server is not added to the default configuration. - -### Solution -Modify the **/etc/sysconfig/sntp** file and add the domain name of the NTP server in China: **0.generic.pool.ntp.org**. - -## Installation Failure Caused by Software Package Conflict, File Conflict, or Missing Software Package - -### Symptom -Software package conflict, file conflict, or missing software packages may occur during software package installation. As a result, the upgrade is interrupted and the installation fails. The error information about software package conflict, file conflict, and missing software packages is as follows: - -The following is an example of software package conflict error information \(the conflict between **libev-libevent-devel-4.24-11.oe1.aarch64** and **libevent-devel-2.1.11-2.oe1.aarch64** is used as an example\): - -``` -package libev-libevent-devel-4.24-11.oe1.aarch64 conflicts with libevent-devel provided by libevent-devel-2.1.11-2.oe1.aarch64 - - cannot install the best candidate for the job - - conflicting requests -``` - -The following is an example of file conflict error information \(the **/usr/bin/containerd** file conflict is used as an example\): - -``` -Error: Transaction test error: - file /usr/bin/containerd from install of containerd-1.2.0-101.oe1.aarch64 conflicts with file from package docker-engine-18.09.0-100.aarch64 - file /usr/bin/containerd-shim from install of containerd-1.2.0-101.oe1.aarch64 conflicts with file from package docker-engine-18.09.0-100.aarch64 -``` - -The following is an example of the error message indicating that the **blivet-data** software package is missing: - -``` -Error: - Problem: cannot install both blivet-data-1:3.1.1-6.oe1.noarch and blivet-data-1:3.1.1-5.noarch - - package python2-blivet-1:3.1.1-5.noarch requires blivet-data = 1:3.1.1-5, but none of the providers can be installed - - cannot install the best update candidate for package blivet-data-1:3.1.1-5.noarch - - problem with installed package python2-blivet-1:3.1.1-5.noarch(try to add '--allowerasing' to command line to replace conflicting packages or '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages) -``` - -### Possible Cause -- In the software packages provided by openEuler, some software packages have different names but the same functions. As a result, the software packages cannot be installed at the same time. -- In the software packages provided by openEuler, some software packages have different names but the same functions. As a result, the files after installation are the same, causing file conflict. -- Some software packages are depended on by other software packages before the upgrade. After the software packages are upgraded, the software packages that depend on them may fail to be installed due to lack of software packages. - -### Solution -If a software package conflict occurs, perform the following steps \(the software package conflict in "Symptom" is used as an example\): - -1. According to the error message displayed during the installation, the software package that conflicts with the to-be-installed software package **libev-libevent-devel-4.24-11.oe1.aarch64** is **libevent-devel-2.1.11-2.oe1.aarch64**. -2. Run the **dnf remove** command to uninstall the software package that conflicts with the software package to be installed. - - ``` - # dnf remove libevent-devel-2.1.11-2.oe1.aarch64 - ``` - -3. Perform the installation again. - -If a file conflict occurs, perform the following steps \(the file conflict in "Symptom" is used as an example\): - -1. According to the error message displayed during the installation, the names of the software packages that cause the file conflict are **containerd-1.2.0-101.oe1.aarch64** and **docker-engine-18.09.0-100.aarch64**. -2. Record the names of the software packages that do not need to be installed. The following uses **docker-engine-18.09.0-100.aarch64** as an example. -3. Run the **dnf remove** command to uninstall the software package that does not need to be installed. - - ``` - # dnf remove docker-engine-18.09.0-100.aarch64 - ``` - -4. Perform the installation again. - -If a software package is missing, perform the following steps \(the missed software package in "Symptom" is used as an example\): - -1. Determine the name of the software package to be upgraded \(**blivet-data-1:3.1.1-5.noarch**\) and the name of the dependent software package \(**python2-blivet-1:3.1.1-5.noarch**\) based on the error information displayed during the upgrade. -2. Run the **dnf remove** command to uninstall the software package that depends on the upgrade package or add the **\-\-allowerasing** parameter when upgrading the software package. - - Run the **dnf remove** command to uninstall the software package that depends on the **blivet-data-1:3.1.1-5.noarch** software package. - - ``` - # dnf remove python2-blivet-1:3.1.1-5.noarch - ``` - - - Add the **\-\-allowerasing** parameter when upgrading the software package. - - ``` - # yum update blivet-data-1:3.1.1-5.noarch -y --allowerasing - ``` - -3. Perform the upgrade again. diff --git a/docs/en/docs/Administration/figures/creat_datadisk.png b/docs/en/docs/Administration/figures/creat_datadisk.png deleted file mode 100644 index 0dfd6a2802184af6d809c485191ea52452cf28d5..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/creat_datadisk.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/creat_datadisk1.png b/docs/en/docs/Administration/figures/creat_datadisk1.png deleted file mode 100644 index 0dfd6a2802184af6d809c485191ea52452cf28d5..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/creat_datadisk1.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/d1376b2a-d036-41c4-b852-e8368f363b5e-1.png b/docs/en/docs/Administration/figures/d1376b2a-d036-41c4-b852-e8368f363b5e-1.png deleted file mode 100644 index 900cdc07c1f0e844bc48fe2342e83c91a23c24ec..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/d1376b2a-d036-41c4-b852-e8368f363b5e-1.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/d1376b2a-d036-41c4-b852-e8368f363b5e.png b/docs/en/docs/Administration/figures/d1376b2a-d036-41c4-b852-e8368f363b5e.png deleted file mode 100644 index 900cdc07c1f0e844bc48fe2342e83c91a23c24ec..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/d1376b2a-d036-41c4-b852-e8368f363b5e.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0229622729.png b/docs/en/docs/Administration/figures/en-us_image_0229622729.png deleted file mode 100644 index 47f2d1cac133379469ed88b2bcb7213d75cf881e..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0229622729.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0229622789.png b/docs/en/docs/Administration/figures/en-us_image_0229622789.png deleted file mode 100644 index 102d523ea5c2a1fedf4975556bf8b26f7599daaf..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0229622789.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0230050789.png b/docs/en/docs/Administration/figures/en-us_image_0230050789.png deleted file mode 100644 index 0b785be2a026fe059c6ee41700a971a11cfff7ae..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0230050789.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231143189.png b/docs/en/docs/Administration/figures/en-us_image_0231143189.png deleted file mode 100644 index 7656f3aa5f5907f1e9f981c0cb5d44d4fcb84ef3..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231143189.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231143191.png b/docs/en/docs/Administration/figures/en-us_image_0231143191.png deleted file mode 100644 index a82d1bcb2b719e3a372f63ae099cb5d52a93b536..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231143191.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231143193.png b/docs/en/docs/Administration/figures/en-us_image_0231143193.png deleted file mode 100644 index 94614045bddb0871b44d2f6603402f914871ad61..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231143193.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231143195.png b/docs/en/docs/Administration/figures/en-us_image_0231143195.png deleted file mode 100644 index 05011dbabe2d245c37ec68de646851bf955a2361..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231143195.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231143196.png b/docs/en/docs/Administration/figures/en-us_image_0231143196.png deleted file mode 100644 index 9bdbac969920af77721980804bd1c5433bea5bc9..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231143196.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231143197.png b/docs/en/docs/Administration/figures/en-us_image_0231143197.png deleted file mode 100644 index 5ea4eec4002374096d8ac18eb973ed3bf874b632..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231143197.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231143198.png b/docs/en/docs/Administration/figures/en-us_image_0231143198.png deleted file mode 100644 index 7d6360c150495d204da4b069e6dc62677580888f..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231143198.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231563132.png b/docs/en/docs/Administration/figures/en-us_image_0231563132.png deleted file mode 100644 index bb801a9471f3f3541ba96491654f25e2df9ce8bf..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231563132.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231563134.png b/docs/en/docs/Administration/figures/en-us_image_0231563134.png deleted file mode 100644 index 398d15376d29d3aa406abb2e7e065d4625428c4d..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231563134.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231563135.png b/docs/en/docs/Administration/figures/en-us_image_0231563135.png deleted file mode 100644 index 785977142a6bf0e1c1815b82dea73d75fa206a75..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231563135.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/en-us_image_0231563136.png b/docs/en/docs/Administration/figures/en-us_image_0231563136.png deleted file mode 100644 index c274db4d0ca9d8758267a916e19fdef4aa22d0ba..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/en-us_image_0231563136.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/example-command-output.png b/docs/en/docs/Administration/figures/example-command-output.png deleted file mode 100644 index 2d77d3dc2934763b5da896a827b9805da34d1c09..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/example-command-output.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/ima_digest_list_update.png b/docs/en/docs/Administration/figures/ima_digest_list_update.png deleted file mode 100644 index 771067e31cee84591fbb914d7be4e8c576d7f5d2..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/ima_digest_list_update.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/ima_performance.png b/docs/en/docs/Administration/figures/ima_performance.png deleted file mode 100644 index f5d641e8682ad2b9c0fbfad191add1819f5b2eef..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/ima_performance.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/ima_verification.png b/docs/en/docs/Administration/figures/ima_verification.png deleted file mode 100644 index fc879949db5387c61ccf6176f948b9a00f4fb053..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/ima_verification.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/login.png b/docs/en/docs/Administration/figures/login.png deleted file mode 100644 index d15c2cad98fba16320d587f3c7b0c80f435c5d3a..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/login.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/mariadb-logical-architecture.png b/docs/en/docs/Administration/figures/mariadb-logical-architecture.png deleted file mode 100644 index c4e65e786d918c84bbb14c101b69bc4ad36ccb4b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/mariadb-logical-architecture.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/nginx-deployment-succeeded.png b/docs/en/docs/Administration/figures/nginx-deployment-succeeded.png deleted file mode 100644 index 9ffb2c142defbd690e5407659116bf8e5582ba73..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/nginx-deployment-succeeded.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/nginx-startup-failure.png b/docs/en/docs/Administration/figures/nginx-startup-failure.png deleted file mode 100644 index c8b855453433796265de42d7ffd0189c7ff9be2b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/nginx-startup-failure.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/postgres.png b/docs/en/docs/Administration/figures/postgres.png deleted file mode 100644 index e7fc36882718587ec949133fe9892185cb4c2158..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/postgres.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/postgresql-architecture.png b/docs/en/docs/Administration/figures/postgresql-architecture.png deleted file mode 100644 index cc73eb31f746409efc1e997072bf3d18b013012e..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/postgresql-architecture.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/the-nginx-service-is-successfully-started.png b/docs/en/docs/Administration/figures/the-nginx-service-is-successfully-started.png deleted file mode 100644 index bc6929772fd98fac3494b4436f26910b09818cb7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/the-nginx-service-is-successfully-started.png and /dev/null differ diff --git a/docs/en/docs/Administration/figures/trusted_chain.png b/docs/en/docs/Administration/figures/trusted_chain.png deleted file mode 100644 index 034f0f092f41fb500ee4122339c447d10d4138ec..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/figures/trusted_chain.png and /dev/null differ diff --git a/docs/en/docs/Administration/managing-hard-disks-through-lvm.md b/docs/en/docs/Administration/managing-hard-disks-through-lvm.md deleted file mode 100644 index 10e5910556072e84aefd1d61cecf406d0f0d2fb0..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/managing-hard-disks-through-lvm.md +++ /dev/null @@ -1,556 +0,0 @@ -# Managing Hard Disks Through LVM - - -- [Managing Hard Disks Through LVM](#managing-hard-disks-through-lvm) - - [LVM Overview](#lvm-overview) - - [Basic Terms](#basic-terms) - - [Installing the LVM](#installing-the-lvm) - - [Managing PVs](#managing-pvs) - - [Creating a PV](#creating-a-pv) - - [Viewing a PV](#viewing-a-pv) - - [Modifying PV Attributes](#modifying-pv-attributes) - - [Deleting a PV](#deleting-a-pv) - - [Managing VGs](#managing-vgs) - - [Creating a VG](#creating-a-vg) - - [Viewing a VG](#viewing-a-vg) - - [Modifying VG Attributes](#modifying-vg-attributes) - - [Extending a VG](#extending-a-vg) - - [Shrinking a VG](#shrinking-a-vg) - - [Deleting a VG](#deleting-a-vg) - - [Managing LVs](#managing-lvs) - - [Creating an LV](#creating-an-lv) - - [Viewing an LV](#viewing-an-lv) - - [Adjusting the LV Size](#adjusting-the-lv-size) - - [Extending an LV](#extending-an-lv) - - [Shrinking an LV](#shrinking-an-lv) - - [Deleting an LV](#deleting-an-lv) - - [Creating and Mounting a File System](#creating-and-mounting-a-file-system) - - [Creating a File System](#creating-a-file-system) - - [Manually Mounting a File System](#manually-mounting-a-file-system) - - [Automatically Mounting a File System](#automatically-mounting-a-file-system) - - - - -## LVM Overview - -Logical Volume Manager \(LVM\) is a mechanism used for managing disk partitions in Linux. By adding a logical layer between disks and file systems, LVM shields the disk partition layout for file systems, thereby improving flexibility in managing disk partitions. - -The procedure of managing a disk through LVM is as follows: - -1. Create physical volumes for a disk. -2. Combine several physical volumes into a volume group. -3. Create logical volumes in the volume group. -4. Create file systems on logical volumes. - -When disks are managed using LVM, file systems are distributed on multiple disks and can be easily resized as needed. Therefore, file system space will no longer be limited by disk capacities. - -### Basic Terms -- Physical media: refers to physical storage devices in the system, such as hard disks \(**/dev/hda** and **/dev/sda**\). It is the storage unit at the lowest layer of the storage system. - -- Physical volume \(PV\): refers to a disk partition or device \(such as a RAID\) that has the same logical functions as a disk partition. PVs are basic logical storage blocks of LVM. A PV contains a special label that is stored in the second 512-byte sector by default. It can also be stored in one of the first four sectors. A label contains the universal unique identifier \(UUID\) of the PV, size of the block device, and the storage location of LVM metadata in the device. - -- Volume group \(VG\): consists of PVs and shields the details of underlying PVs. You can create one or more logical volumes within a VG without considering detailed PV information. - -- Logical volume \(LV\): A VG cannot be used directly. It can be used only after being partitioned into LVs. LVs can be formatted into different file systems and can be directly used after being mounted. - -- Physical extent \(PE\): A PE is a small storage unit in a PV. The PE size is the same as the size of the logical extent in the VG. - -- Logical extent \(LE\): An LE is a small storage unit in an LV. In one VG, the LEs of all the LVs have the same size. - -## Installing the LVM - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The LVM has been installed on the openEuler OS by default. You can run the **rpm -qa | grep lvm2** command to check whether it is installed. If the command output contains "lvm2", the LVM has been installed. In this case, skip this section. If no information is output, the LVM is not installed. Install it by referring to this section. - -1. Configure the local yum source. For details, see [Configuring the Repo Server](./configuring-the-repo-server.html). -2. Clear the cache. - - ``` - $ dnf clean all - ``` - -3. Create a cache. - - ``` - $ dnf makecache - ``` - -4. Install the LVM as the **root** user. - - ``` - # dnf install lvm2 - ``` - -5. Check the installed RPM package. - - ``` - $ rpm -qa | grep lvm2 - ``` - - -## Managing PVs - -### Creating a PV -Run the **pvcreate** command as the **root** user to create a PV. - -``` -pvcreate [option] devname ... -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-f**: forcibly creates a PV without user confirmation. - - **-u**: specifies the UUID of the device. - - **-y**: answers yes to all questions. - -- _devname_: specifies the name of the device corresponding to the PV to be created. If multiple PVs need to be created in batches, set this option to multiple device names and separate the names with spaces. - -Example 1: Create PVs based on **/dev/sdb** and **/dev/sdc**. - -``` -# pvcreate /dev/sdb /dev/sdc -``` - -Example 2: Create PVs based on **/dev/sdb1** and **/dev/sdb2**. - -``` -# pvcreate /dev/sdb1 /dev/sdb2 -``` - -### Viewing a PV -Run the **pvdisplay** command as the **root** user to view PV information, including PV name, VG to which the PV belongs, PV size, PE size, total number of PEs, number of available PEs, number of allocated PEs, and UUID. - -``` -pvdisplay [option] devname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-s**: outputs information in short format. - - **-m**: displays the mapping from PEs to LEs. - -- _devname_: indicates the device corresponding to the PV to be viewed. If no PVs are specified, information about all PVs is displayed. - -Example: Run the following command to display the basic information about the PV **/dev/sdb**: - -``` -# pvdisplay /dev/sdb -``` - -### Modifying PV Attributes -Run the **pvchange** command as the **root** user to modify the attributes of a PV. - -``` -pvchange [option] pvname ... -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-u**: generates a new UUID. - - **-x**: indicates whether PE allocation is allowed. - -- _pvname_: specifies the name of the device corresponding to the PV to be modified. If multiple PVs need to be modified in batches, set this option to multiple device names and separate the names with spaces. - -Example: Run the following command to prohibit PEs on the PV **/dev/sdb** from being allocated. - -``` -# pvchange -x n /dev/sdb -``` - -### Deleting a PV -Run the **pvremove** command as the **root** user to delete a PV. - -``` -pvremove [option] pvname ... -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-f**: forcibly deletes a PV without user confirmation. - - **-y**: answers yes to all questions. - -- _pvname_: specifies the name of the device corresponding to the PV to be deleted. If multiple PVs need to be deleted in batches, set this option to multiple device names and separate the names with spaces. - -Example: Run the following command to delete the PV **/dev/sdb**: - -``` -# pvremove /dev/sdb -``` - -## Managing VGs - -### Creating a VG -Run the **vgcreate** command as the **root** user to create a VG. - -``` -vgcreate [option] vgname pvname ... -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-l**: specifies the maximum number of LVs that can be created on the VG. - - **-p**: specifies the maximum number of PVs that can be added to the VG. - - **-s**: specifies the PE size of a PV in the VG. - -- _vgname_: name of the VG to be created. -- _pvname_: name of the PV to be added to the VG. - -Example: Run the following command to create VG **vg1** and add the PVs **/dev/sdb** and **/dev/sdc** to the VG. - -``` -# vgcreate vg1 /dev/sdb /dev/sdc -``` - -### Viewing a VG -Run the **vgdisplay** command as the **root** user to view VG information. - -``` -vgdisplay [option] [vgname] -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-s**: outputs information in short format. - - **-A**: displays only attributes of active VGs. - -- _vgname_: name of the VG to be viewed. If no VGs are specified, information about all VGs is displayed. - -Example: Run the following command to display the basic information about VG **vg1**: - -``` -# vgdisplay vg1 -``` - -### Modifying VG Attributes -Run the **vgchange** command as the **root** user to modify the attributes of a VG. - -``` -vgchange [option] vgname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-a**: sets the active status of the VG. - -- _vgname_: name of the VG whose attributes are to be modified. - -Example: Run the following command to change the status of **vg1** to active. - -``` -# vgchange -ay vg1 -``` - -### Extending a VG -Run the **vgextend** command as the **root** user to dynamically extend a VG. In this way, the VG size is extended by adding PVs to the VG. - -``` -vgextend [option] vgname pvname ... -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **dev**: debugging mode. - - **-t**: test only. - -- _vgname_: name of the VG whose size is to be extended. -- _pvname_: name of the PV to be added to the VG. - -Example: Run the following command to add PV **/dev/sdb** to VG **vg1**: - -``` -# vgextend vg1 /dev/sdb -``` - -### Shrinking a VG -Run the **vgreduce** command as the **root** user to delete PVs from a VG to reduce the VG size. A VG must contain at least one PV. - -``` -vgreduce [option] vgname pvname ... -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-a**: If no PVs are specified in the command, all empty PVs are deleted. - - **\-\-removemissing**: deletes lost PVs in the VG to restore the VG to the normal state. - -- _vgname_: name of the VG to be shrunk. -- _pvname_: name of the PV to be deleted from the VG. - -Example: Run the following command to remove PV **/dev/sdb2** from VG **vg1**: - -``` -# vgreduce vg1 /dev/sdb2 -``` - -### Deleting a VG -Run the **vgremove** command as the **root** user to delete a VG. - -``` -vgremove [option] vgname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-f**: forcibly deletes a VG without user confirmation. - -- _vgname_: name of the VG to be deleted. - -Example: Run the following command to delete VG **vg1**. - -``` -# vgremove vg1 -``` - -## Managing LVs - -### Creating an LV -Run the **lvcreate** command as the **root** user to create an LV. - -``` -lvcreate [option] vgname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-L**: specifies the size of the LV in kKmMgGtT. - - **-l**: specifies the size of the LV \(number of LEs\). - - **-n**: specifies the name of the LV to be created. - - **-s**: creates a snapshot. - -- _vgname_: name of the VG to be created. - -Example 1: Run the following command to create a 10 GB LV in VG **vg1**. - -``` -# lvcreate -L 10G vg1 -``` - -Example 2: Run the following command to create a 200 MB LV in VG **vg1** and name the LV **lv1**. - -``` -# lvcreate -L 200M -n lv1 vg1 -``` - -### Viewing an LV -Run the **lvdisplay** command as the **root** user to view the LV information, including the size of the LV, its read and write status, and snapshot information. - -``` -lvdisplay [option] [lvname] -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - -- **-v**: displays the mapping from LEs to PEs. - -- _lvname_: device file corresponding to the LV whose attributes are to be displayed. If this option is not set, attributes of all LVs are displayed. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Device files corresponding to LVs are stored in the VG directory. For example, if LV **lv1** is created in VG **vg1**, the device file corresponding to **lv1** is **/dev/vg1/lv1**. - - -Example: Run the following command to display the basic information about LV **lv1**: - -``` -# lvdisplay /dev/vg1/lv1 -``` - -### Adjusting the LV Size -Run the **lvresize** command as the **root** user to increase or reduce the size of an LVM LV. This may cause data loss. Therefore, exercise caution when running this command. - -``` -lvresize [option] vgname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-L**: specifies the size of the LV in kKmMgGtT. - - **-l**: specifies the size of the LV \(number of LEs\). - - **-f**: forcibly adjusts the size of the LV without user confirmation. - -- _lvname_: name of the LV to be adjusted. - -Example 1: Run the following command to increase the size of LV **/dev/vg1/lv1** by 200 MB. - -``` -# lvresize -L +200 /dev/vg1/lv1 -``` - -Example 2: Run the following command to reduce the size of LV **/dev/vg1/lv1** by 200 MB. - -``` -# lvresize -L -200 /dev/vg1/lv1 -``` - -### Extending an LV -Run the **lvextend** command as the **root** user to dynamically extend the size of an LV online without interrupting the access of applications to the LV. - -``` -lvextend [option] lvname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-L**: specifies the size of the LV in kKmMgGtT. - - **-l**: specifies the size of the LV \(number of LEs\). - - **-f**: forcibly adjusts the size of the LV without user confirmation. - -- _lvname_: device file of the LV whose size is to be extended. - -Example: Run the following command to increase the size of LV **/dev/vg1/lv1** by 100 MB. - -``` -# lvextend -L +100M /dev/vg1/lv1 -``` - -### Shrinking an LV -Run the **lvreduce** command as the **root** user to reduce the size of an LV. This may delete existing data on the LV. Therefore, confirm whether the data can be deleted before running the command. - -``` -lvreduce [option] lvname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-L**: specifies the size of the LV in kKmMgGtT. - - **-l**: specifies the size of the LV \(number of LEs\). - - **-f**: forcibly adjusts the size of the LV without user confirmation. - -- _lvname_: device file of the LV whose size is to be extended. - -Example: Run the following command to reduce the space of LV **/dev/vg1/lvl** by 100 MB: - -``` -# lvreduce -L -100M /dev/vg1/lv1 -``` - -### Deleting an LV -Run the **lvremove** command as the **root** user to delete an LV. If the LV has been mounted by running the **mount** command, you need to run the **umount** command to unmount the LV before running the **lvremove** command. - -``` -lvremove [option] vgname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-f**: forcibly deletes an LV without user confirmation. - -- _vgname_: name of the LV to be deleted. - -Example: Run the following command to delete LV **/dev/vg1/lv1**. - -``` -# lvremove /dev/vg1/lv1 -``` - -## Creating and Mounting a File System - -After creating an LV, you need to create a file system on the LV and mount the file system to the corresponding directory. - -### Creating a File System -Run the **mkfs** command as the **root** user to create a file system. - -``` -mkfs [option] lvname -``` - -In the preceding information: - -- _option_: command parameter options. Common parameter options are as follows: - - **-t**: specifies the type of the Linux system to be created, such as **ext2**, **ext3**, and **ext4**. The default type is **ext2**. - -- _lvname_: name of the LV device file corresponding to the file system to be created. - -Example: Run the following command to create the **ext4** file system on LV **/dev/vg1/lv1**: - -``` -# mkfs -t ext4 /dev/vg1/lv1 -``` - -### Manually Mounting a File System -The file system that is manually mounted is not valid permanently. It does not exist after the OS is restarted. - -Run the **mount** command as the **root** user to mount a file system. - -``` -mount lvname mntpath -``` - -In the preceding information: - -- _lvname_: name of the LV device file corresponding to the file system to be mounted. -- _mntpath_: mount path. - -Example: Run the following command to mount LV **/dev/vg1/lv1** to the directory **/mnt/data**. - -``` -# mount /dev/vg1/lv1 /mnt/data -``` - -### Automatically Mounting a File System -A file system that is automatically mounted does not exist after the OS is restarted. You need to manually mount the file system again. If you perform the following steps as the **root** user after manually mounting the file system, the file system can be automatically mounted after the OS is restarted. - -1. Run the **blkid** command to query the UUID of an LV. The following uses LV **/dev/vg1/lv1** as an example: - - ``` - # blkid /dev/vg1/lv1 - ``` - - Check the command output. It contains the following information in which _uuidnumber_ is a string of digits, indicating the UUID, and _fstype_ indicates the file system type. - - /dev/vg1/lv1: UUID=" _uuidnumber_ " TYPE=" _fstype_ " - -2. Run the **vi /etc/fstab** command to edit the **fstab** file and add the following content to the end of the file: - - ``` - UUID=uuidnumber mntpath fstype defaults 0 0 - ``` - - In the preceding information: - - - Column 1: indicates the UUID. Enter _uuidnumber_ obtained in [1](#li65701520154311). - - Column 2: indicates the mount directory of the file system. Replace _mntpath_ with the actual value. - - Column 3: indicates the file system format. Enter _fstype_ obtained in [1](#li65701520154311). - - Column 4: indicates the mount option. In this example, **defaults** is used. - - Column 5: indicates the backup option. Enter either **1** \(the system automatically backs up the file system\) or **0** \(the system does not back up the file system\). In this example, **0** is used. - - Column 6: indicates the scanning option. Enter either **1** \(the system automatically scans the file system during startup\) or **0** \(the system does not scan the file system\). In this example, **0** is used. - -3. Verify the automatic mounting function. - 1. Run the **umount** command to unmount the file system. The following uses LV **/dev/vg1/lv1** as an example: - - ``` - # umount /dev/vg1/lv1 - ``` - - 2. Run the following command to reload all content in the **/etc/fstab** file: - - ``` - # mount -a - ``` - - 3. Run the following command to query the file system mounting information \(**/mnt/data** is used as an example\): - - ``` - # mount | grep /mnt/data - ``` - - Check the command output. If the command output contains the following information, the automatic mounting function takes effect: - - /dev/vg1/lv1 on /mnt/data diff --git a/docs/en/docs/Administration/process-management.md b/docs/en/docs/Administration/process-management.md deleted file mode 100644 index 4032595e9628a73264e53976e363328e2a4cb716..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/process-management.md +++ /dev/null @@ -1,338 +0,0 @@ -# Process Management - -The operating system manages multiple user requests and tasks. In most cases, the operating system comes with only one CPU and one main memory, but it may have multiple tier-2 disks and input/output \(I/O\) devices. Therefore, users have to share resources, but it appears to users that they are exclusively occupying resources. The operating system places user tasks, OS tasks, emailing, print tasks, and other pending tasks in the queue and schedules the tasks according to predefined rules. In this topic, you will know how the operating system manages processes. - - -- [Process Management](#process-management) - - [Viewing Processes](#viewing-processes) - - [who Command](#who-command) - - [ps Command](#ps-command) - - [top Command](#top-command) - - [kill Command](#kill-command) - - [Scheduling a Process](#scheduling-a-process) - - [Using the at Command to Run Processes at the Scheduled Time](#using-the-at-command-to-run-processes-at-the-scheduled-time) - - [Using the cron Service to Run Commands Periodically](#using-the-cron-service-to-run-commands-periodically) - - [Suspending/Resuming a Process](#suspendingresuming-a-process) - - - -## Viewing Processes - -Linux is a multi-task system and needs to get process information during process management. To manage processes, you first need to know the number of processes and their statuses. Multiple commands are available to view processes. - -### who Command -The who command is used to display system user information. For example, before running the talk command to establish instant communication with another user, you need to run the who command to determine whether the target user is online. As another example, the system administrator can run the who command to learn what each login user is doing at the current time. The who command is widely seen in system administration since it is easy to use and can return a comprehensive set of accurate user information. - -The following is an example output of the who command, where system users and their status are displayed: The use of the **who** command is as follows: - -``` -$ who -admin tty1 Jul 28 15:55 -admin pts/0 Aug 5 15:46 (192.168.0.110) -admin pts/2 Jul 29 19:52 (192.168.0.110) -root pts/3 Jul 30 12:07 (192.168.0.110) -root pts/4 Jul 31 10:29 (192.168.0.144) -root pts/5 Jul 31 14:52 (192.168.0.11) -root pts/6 Aug 6 10:12 (192.168.0.234) -root pts/8 Aug 6 11:34 (192.168.0.234) -``` - -### ps Command -The **ps** command is the most basic and powerful command to view process information. The ps command is used to display process information, including which processes are running, terminated, resource-hungry, or stay as zombies. - -A common scenario is using the ps command to monitor background processes, which do not interact with your screen, keyboard, and other I/O devices. [Table 1](#en-us_topic_0151921029_t34619d964a3d41ad8694189ec383359c) lists the common ps command options. - -**Table 1** Common ps command options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Option

-

Description

-

-e

-

Displays all processes.

-

-f

-

Full output format.

-

-h

-

Hides column headings in the listing of process information.

-

-l

-

Long output format.

-

-w

-

Wide output format.

-

-a

-

Lists all processes on a terminal, including those of other users.

-

-r

-

Lists only running processes.

-

-x

-

Lists all processes without controlling terminals.

-
- -For example, to list all processes on a terminal, run the following command: - -``` -$ ps -a - PID TTY TIME CMD -12175 pts/6 00:00:00 bash -24526 pts/0 00:00:00 vsftpd -29478 pts/5 00:00:00 ps -32461 pts/0 1-01:58:33 sh -``` - -### top Command -Both the top and the ps commands can display a list of currently running processes, but the top command allows you to update the displayed list of processes repeatedly with the press of a button. If the top command is executed in foreground, it exclusively occupies foreground until it is terminated. The top command provides real-time visibility into system processor status. You can sort the list of CPU tasks by CPU usage, memory usage, or task execution time. Extensive customization of the display, such as choice of columns or sorting method, can be achieved using interactive commands or the customization file. - -[Figure 1](#en-us_topic_0151921029_f289234fcdbac453796200d80e9889cd1) provides an example output of the top command. - -**Figure 1** Example command output -![](./figures/example-command-output.png "example-command-output") - -### kill Command -The **kill** command is used to terminate a process regardless of whether the process is running in foreground or background. It differs from the combo key **Ctrl+c**, which can terminate only foreground processes. The kill command is used to terminate a process regardless of whether the process is running in foreground or background. The reason for terminating a background process can be heavy use of CPU resources or deadlock. - -The kill command sends a signal to terminate running processes. By default, the TERM signal is used. The TERM signal terminates all processes incapable of capturing the TERM signal. To terminate a process capable of capturing the TERM signal, use the KILL signal \(signal ID: 9\) instead. - -Two types of syntax of the kill command: - -``` -kill [-s signal | -p] [-a] PID… -kill -l [signal] -``` - -The process ID is retrieved from the ps command. The **-s** option indicates the signal sent to specified program. The signal details can be viewed by running the **kill -l** command. The **-p** option indicates the specified process IDs. - -For example, to terminate the process with ID 1409, run the following command as the **root** user: - -``` -# kill -9 1409 -``` - -Example output of the kill command with the -l option - -``` -$ kill -l - 1) SIGHUP 2) SIGINT 3) SIGQUIT 4) SIGILL 5) SIGTRAP - 6) SIGABRT 7) SIGBUS 8) SIGFPE 9) SIGKILL 10) SIGUSR1 -11) SIGSEGV 12) SIGUSR2 13) SIGPIPE 14) SIGALRM 15) SIGTERM -16) SIGSTKFLT 17) SIGCHLD 18) SIGCONT 19) SIGSTOP 20) SIGTSTP -21) SIGTTIN 22) SIGTTOU 23) SIGURG 24) SIGXCPU 25) SIGXFSZ -26) SIGVTALRM 27) SIGPROF 28) SIGWINCH 29) SIGIO 30) SIGPWR -31) SIGSYS 34) SIGRTMIN 35) SIGRTMIN+1 36) SIGRTMIN+2 37) SIGRTMIN+3 -38) SIGRTMIN+4 39) SIGRTMIN+5 40) SIGRTMIN+6 41) SIGRTMIN+7 42) SIGRTMIN+8 -43) SIGRTMIN+9 44) SIGRTMIN+10 45) SIGRTMIN+11 46) SIGRTMIN+12 47) SIGRTMIN+13 -48) SIGRTMIN+14 49) SIGRTMIN+15 50) SIGRTMAX-14 51) SIGRTMAX-13 52) SIGRTMAX-12 -53) SIGRTMAX-11 54) SIGRTMAX-10 55) SIGRTMAX-9 56) SIGRTMAX-8 57) SIGRTMAX-7 -58) SIGRTMAX-6 59) SIGRTMAX-5 60) SIGRTMAX-4 61) SIGRTMAX-3 62) SIGRTMAX-2 -63) SIGRTMAX-1 64) SIGRTMAX -``` - -## Scheduling a Process - -The time-consuming and resource-demanding part of maintenance work is often performed at late night. You can arrange relevant processes to get started at the scheduled time instead of staying up all night. Here, we will explain the process scheduling commands. - - -### Using the at Command to Run Processes at the Scheduled Time - -#### Function -The at command is used to run a batch of processes \(a series of commands\) at the scheduled time or time+date. - -Syntax of the at command: - -``` -at [-V] [-q queue] [-f filename] [-mldbv] time -at -c job [job...] -``` - -#### Time Format -The scheduled time can be in any of the following formats: - -- hh:mm today: If hh:mm is earlier than the current time, the selected commands will be run at hh:mm the next day. -- midnight, noon, teatime \(typically at 16:00\), or the like -- 12-hour format followed by am or pm -- Time + date \(month day, mm/dd/yy, or dd.mm.yy\) The scheduled date must follow the scheduled time. - -The scheduled time can also be relative time, which is suitable for scheduling commands that are going to be executed soon. For example, now+_N_ minutes, hours, days, or weeks. _N_ is time, which may be a few days or hours. Further, the scheduled time can be words like today, tomorrow, or the like. Here are some examples of the scheduled time. - -Imagine the current time is 12:30 June 7 2019 and you want to run a command at 4:30 pm. The scheduled time in the at command can be any of the following: - -``` - at 4:30pm - at 16:30 - at 16:30 today - at now+4 hours - at now+ 240 minutes - at 16:30 7.6.19 - at 16:30 6/7/19 - at 16:30 Jun 7 -``` - -Although you can select any of the preceding examples according to your preference, absolute time in 24-hour format, such as at 16:30 6/7/19, is recommended. - -#### Privileges -Only commands from standard input or from the file specified by the -f option can be scheduled by the at command to be executed. If the su command is executed to switch the operating system from user A to user B and then the at command is executed at the shell prompt of user B, the at command execution result is sent to user B. whereas emails \(if any\) are sent to user A. - -For example, to run the slocate -u command at 10 am on June 8, 2019, perform the following steps as the **root** user: - -``` -# at 10:00 6/8/19 -at> slocate -u -at> -[1]+ Stopped at 10:00 6/8/19 -``` - -When the at\> prompt appears, type **slocate -u** and press Enter. Repeat substep 2 to add other commands that need to be run at 10 am on 8 June 2015. Then, press Ctrl+d to exit the at command. - -The administrator is authorized to run the at command unconditionally. For other users, their privilege to run the at command is defined in /etc/at.allow and /etc/at.deny files. - -### Using the cron Service to Run Commands Periodically - -The at command can run commands at the scheduled time but only once. It means that after the running command is specified, the system completes the task at the specified time. If you need to run commands repeatedly, the cron service is a good helper. - -#### Cron Service -The **cron** service searches the **/var/spool/cron** directory for **crontab** files named by the user name in the /etc/passwd file and loads the search results into memory to execute the commands in the **crontab** files. Each user has a crontab file, with the file name being the same as the user name. For example, the **crontab** file of the **userexample** user is **/var/spool/cron/userexample**. - -The **cron** service also reads the cron configuration file **/etc/crontab** every minute, which can be edited in various formats. If no crontab files are found, the **cron** service enters sleep mode and releases system resources. One minute later, the **cron** service is awoken to repeat the search work and command execution. Therefore, the background process occupies few resources and is wakened up every minute to check whether there are commands to be executed. - -Command execution results are then mailed to users specified by the environment variable MAILTO in the /etc/crontab file. The **cron** service, once started, does not require manual intervention except when you need to replace periodic commands with new ones. - -#### crontab Command -The crontab command is used to install, edit, remove, list, and perform other operations on crontab files. Each user has its own crontab files and can add commands to be executed to the files. - -Here are common crontab command options: - -- crontab -u //Set the **cron** service of a user. This option is required only when the **crontab** command is run by the **root** user. -- crontab -l //List details of the **cron** service of a user. -- crontab -r //Remove the **cron** service of a user. -- crontab -e //Edit the **cron** service of a user. - -For example, to list cron service settings of the user **root**, run the following command: - -``` -# crontab -u root -l -``` - -#### crontab Files -Enter the commands to be executed and time in crontab files. Each line in the files contains six fields. The first five fields are the time when the specified command is executed, and the last field is the command to be executed. Fields are separated by spaces or tabs. The format is as follows: - -``` -minute hour day-of-month month-of-year day-of-week commands -``` - -Each field is described as follows: - -**Table 2** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

minute

-

The minute of the hour at which commands will be executed. Value range: 0–59.

-

hour

-

The hour of the day at which periodic commands will be executed. Value range: 0–23.

-

day-of-month

-

The day of month at which periodic commands will be executed. Value range: 1–31.

-

month-of-year

-

The month of year at which periodic commands will be executed. Value range: 1–12.

-

day-of-week

-

The day of week at which periodic commands will be executed. Value range: 0–6.

-

commands

-

Periodic commands.

-
- -The fields cannot be left unspecified. In addition to numerical values, the following special symbols are allowed: Asterisk \(\*\): a wildcard value. Forward slash \(/\): followed by a numeral N to indicate that commands will be executed at a regular interval of N. Hyphen \(-\): used with a range.Comma \(,\): used to separate discrete numbers. A complete path to the commands shall be provided. - -For example, to allow the operating system to add sleepy to the /tmp/test.txt file every two hours from 18 pm to 22 pm, add the following line in a crontab file: - -``` -* 18-22/2 * * * echo "sleepy" >> /tmp/test.txt -``` - -Each time the cron service settings of a user are edited, the cron service generates in the /var/spool/cron directory a crontab file named after the user. The crontab file can be edited only using the crontab -e command. Alternatively, the user can create a file and run the crontab _filename_ command to import its cron settings into the new file. - -For example, to create a crontab file for the userexample user, perform the following steps: The procedure is as follows: - -1. Create a file using any text editor. Add the commands that need to be executed periodically and the command execution interval to the new file. In this example, the new file is **\~/userexample.cron**. -2. Run the following command as the **root** user to install the new file as the crontab file of the userexample user: - - ``` - # crontab -u userexample ~/userexample.cron - ``` - - -After the new file is installed, you will find a file named userexample in the **/var/spool/cron** directory. This file is the required crontab file. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Do not restart the cron service after a crontab file is modified, because the cron service, once started, reads the crontab file every minute to check whether there are commands that need to be executed periodically. You do not need to restart the **cron** service after modifying the **crontab** file. - -#### /etc/crontab File -The **cron** service reads all files in the **/var/spool/cron** directory and the **crontab** file in the **/etc/crontab** directory every minute. Therefore, you can use the **cron** service by configuring the **crontab** file. A crontab file contains user-specific commands, whereas the **/etc/crontab** file contains system-wide commands. Example /etc/crontab file - -``` -SHELL=/bin/sh -PATH=/usr/bin:/usr/sbin:/sbin:/bin:/usr/lib/news/bin -MAILTO=root //If an error occurs or data is output, the data is sent to the account by email. -HOME=/ -# run-parts -01 * * * * root run-parts /etc/cron.hourly //Run scripts in the /etc/cron.hourly directory once an hour. -02 4 * * * root run-parts /etc/cron.daily //Run scripts in the /etc/cron.daily directory once a day. -22 4 * * 0 root run-parts /etc/cron.weekly //Run scripts in the /etc/cron.weekly directory once a week. -42 4 1 * * root run-parts /etc/cron.monthly //Run scripts in the /etc/cron.monthly directory once a month. -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If the **run-parts** parameter is deleted, a script name instead of a directory name is executed. - -## Suspending/Resuming a Process - -A process can be suspended or resumed by job control, and the process will continue to work from the suspended point after being resumed. To suspend a foreground process, press Ctrl+Z. After you press Ctrl+Z, the cat command is suspended together with the foreground process you wish to suspend. You can use the jobs command instead to display a list of shell jobs, including their job names, IDs, and status. - -To resume a process in foreground or background, run the fg or bg command, respectively. The process then starts from where it paused previously. diff --git a/docs/en/docs/Administration/public_sys-resources/icon-caution.gif b/docs/en/docs/Administration/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/Administration/public_sys-resources/icon-danger.gif b/docs/en/docs/Administration/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/Administration/public_sys-resources/icon-note.gif b/docs/en/docs/Administration/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/Administration/public_sys-resources/icon-notice.gif b/docs/en/docs/Administration/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/Administration/public_sys-resources/icon-tip.gif b/docs/en/docs/Administration/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/Administration/public_sys-resources/icon-warning.gif b/docs/en/docs/Administration/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Administration/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/Administration/service-management.md b/docs/en/docs/Administration/service-management.md deleted file mode 100644 index f7950c0494a2d0714b2cef3d08edd88b4e90a9a0..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/service-management.md +++ /dev/null @@ -1,817 +0,0 @@ -# Service Management - -This topic describes how to manage your operating system and services using the systemd. - - -- [Service Management](#service-management) - - [Introduction to systemd](#introduction-to-systemd) - - [Systemd Units](#systemd-units) - - [Features](#features) - - [Fast Activation](#fast-activation) - - [On-Demand Activation](#on-demand-activation) - - [Service Lifecycle Management by Cgroups](#service-lifecycle-management-by-cgroups) - - [Mount and Automount Point Management](#mount-and-automount-point-management) - - [Transactional Dependency Management](#transactional-dependency-management) - - [Compatibility with SysVinit Scripts](#compatibility-with-sysvinit-scripts) - - [System State Snapshots and System Restoration](#system-state-snapshots-and-system-restoration) - - [Managing System Services](#managing-system-services) - - [Comparison Between SysVinit and systemd Commands](#comparison-between-sysvinit-and-systemd-commands) - - [Listing Services](#listing-services) - - [Displaying Service Status](#displaying-service-status) - - [Starting a Service](#starting-a-service) - - [Stopping a Service](#stopping-a-service) - - [Restarting a Service](#restarting-a-service) - - [Enabling a Service](#enabling-a-service) - - [Disabling a Service](#disabling-a-service) - - [Changing a Runlevel](#changing-a-runlevel) - - [Targets and Runlevels](#targets-and-runlevels) - - [Viewing the Default Startup Target](#viewing-the-default-startup-target) - - [Viewing All Startup Targets](#viewing-all-startup-targets) - - [Changing the Default Target](#changing-the-default-target) - - [Changing the Current Target](#changing-the-current-target) - - [Changing to Rescue Mode](#changing-to-rescue-mode) - - [Changing to Emergency Mode](#changing-to-emergency-mode) - - [Shutting Down, Suspending, and Hibernating the Operating System](#shutting-down-suspending-and-hibernating-the-operating-system) - - [systemctl Command](#systemctl-command) - - [Shutting Down the Operating System](#shutting-down-the-operating-system) - - [Restarting the Operating System](#restarting-the-operating-system) - - [Suspending the Operating System](#suspending-the-operating-system) - - [Hibernating the Operating System](#hibernating-the-operating-system) - - - - -## Introduction to systemd - -The systemd is a system and service manager for Linux operating systems. It is designed to be backward compatible with SysV and LSB init scripts, and provides a number of features such as Socket & D-Bus based activation of services, on-demand activation of daemons, system state snapshots, and mount & automount point management. With systemd, the service control logic and parallelization are refined. - -### Systemd Units -In systemd, the targets of most actions are units, which are resources systemd know how to manage. Units are categorized by the type of resources they represent and defined in unit configuration files. For example, the avahi.service unit represents the Avahi daemon and is defined in the **avahi.service** file. [Table 1](#en-us_topic_0151921012_t2dcb6d973cc249ed9ccd56729751ca6b) lists available types of systemd units. - -**Table 1** Available types of systemd units - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Unit Type

-

File Extension

-

Description

-

Service unit

-

.service

-

A system service.

-

Target unit

-

.target

-

A group of systemd units.

-

Automount unit

-

.automount

-

A file system automount point.

-

Device unit

-

.device

-

A device file recognized by the kernel.

-

Mount unit

-

.mount

-

A file system mount point.

-

Path unit

-

.path

-

A file or directory in a file system.

-

Scope unit

-

.scope

-

An externally created process.

-

Slice unit

-

.slice

-

A group of hierarchically organized units that manage system processes.

-

Socket unit

-

.socket

-

An inter-process communication socket.

-

Swap unit

-

.swap

-

A swap device or a swap file.

-

Timer unit

-

.timer

-

A systemd timer.

-
- -All available types of systemd units are located in one of the following directories listed in [Table 2](#en-us_topic_0151921012_t2523a0a9a0c54f9b849e52d1efa0160c). - -**Table 2** Locations of available systemd units - - - - - - - - - - - - - - - - -

Directory

-

Description

-

/usr/lib/systemd/system/

-

Systemd units distributed with installed RPM packages.

-

/run/systemd/system/

-

Systemd units created at runtime.

-

/etc/systemd/system/

-

Systemd units created and managed by the system administrator.

-
- -## Features - -### Fast Activation -The systemd provides more aggressive parallelization than UpStart. The use of Socket- and D-Bus based activation reduces the time required to boot the operating system. - -To accelerate system boot, systemd seeks to: - -- Activate only the necessary processes -- Activate as many processes as possible in parallel - -### On-Demand Activation -During SysVinit initialization, it activates all the possible background service processes that might be used. Users can log in only after all these service processes are activated. The drawbacks in SysVinit are obvious: slow system boot and a waste of system resources. - -Some services may rarely or even never be used during system runtime. For example, CUPS, printing services are rarely used on most servers. SSHD is rarely accessed on many servers. It is unnecessary to spend time on starting these services and system resources. - -systemd can only be activated when a service is requested. If the service request is over, systemd stops. - -### Service Lifecycle Management by Cgroups -An important role of an init system is to track and manage the lifecycle of services. It can start and stop a service. However, it is more difficult than you could ever imagine to encode an init system into stopping services. - -Service processes often run in background as daemons and sometimes fork twice. In UpStart, the expect stanza in the configuration file must be correctly configured. Otherwise, UpStart is unable to learn a daemon's PID by counting the number of forks. - -Things are made simpler with Cgroups, which have long been used to manage system resource quotas. The ease of use comes largely from its file-system-like user interface. When a parent service creates a child service, the latter inherits all attributes of the Cgroup to which the parent service belongs. This means that all relevant services are put into the same Cgroup. The systemd can find the PIDs of all relevant services simply by traversing their control group and then stop them one by one. - -### Mount and Automount Point Management -In traditional Linux systems, users can use the **/etc/fstab** file to maintain fixed file system mount points. These mount points are automatically mounted during system startup. Once the startup is complete, these mount points are available. These mount points are file systems critical to system running, such as the **HOME** directory. Like SysVinit, systemd manages these mount points so that they can be automatically mounted at system startup. systemd is also compatible with the **/etc/fstab** file. You can continue to use this file to manage mount points. - -There are times when you need to mount or unmount on demand. For example, a temporary mounting point is required for you to access the DVD content, and the mounting point is canceled \(using the **umount** command\) if you no longer need to access the content, thereby saving resources. This is traditionally achieved using the autofs service. - -The systemd allows automatic mount without a need to install autofs. - -### Transactional Dependency Management -System boot involves a host of separate jobs, some of which may be dependent on each other. For example, a network file system \(NFS\) can be mounted only after network connectivity is activated. The systemd can run a large number of dependent jobs in parallel, but not all of them. Looking back to the NFS example, it is impossible to mount NFS and activate network at the same time. Before running a job, systemd calculates its dependencies, creates a temporary transaction, and verifies that this transaction is consistent \(all relevant services can be activated without any dependency on each other\). - -### Compatibility with SysVinit Scripts -Like UpStart, systemd introduces new configuration methods and has new requirements for application development. If you want to replace the currently running initialization system with systemd, systemd must be compatible with the existing program. It is difficult to modify all the service code in any Linux distribution in a short time for the purpose of using systemd. - -The systemd provides features compatible with SysVinit and LSB initscripts. You do not need to modify the existing services and processes in the system. This reduces the cost of migrating the system to systemd, making it possible for users to replace the existing initialization system with systemd. - -### System State Snapshots and System Restoration -The systemd can be started on demand. Therefore, the running status of the system changes dynamically, and you cannot know the specific services that are running in the system. systemd snapshots enable the current system running status to be saved and restored. - -For example, if services A and B are running in the system, you can run the **systemd** command to create a snapshot for the current system running status. Then stop process A or make any other change to the system, for example, starting process C. After these changes, run the snapshot restoration command of systemd to restore the system to the point at which the snapshot was taken. That is, only services A and B are running. A possible application scenario is debugging. For example, when an exception occurs on the server, a user saves the current status as a snapshot for debugging, and then perform any operation, for example, stopping the service. After the debugging is complete, restore the snapshot. - -## Managing System Services - -The systemd provides the systemctl command to start, stop, restart, view, enable, and disable system services. - -### Comparison Between SysVinit and systemd Commands -The **systemctl** command from the **systemd** command has the functions similar to the **SysVinit** command. Note that the **service** and **chkconfig** commands are supported in this version. For details, see [Table 3](#en-us_topic_0151920917_ta7039963b0c74b909b72c22cbc9f2e28). You are advised to manage system services by running the **systemctl** command. - -**Table 3** Comparison between SysVinit and systemd commands - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

SysVinit Command

-

systemd Command

-

Description

-

service network start

-

systemctl start network.service

-

Starts a service.

-

service network stop

-

systemctl stop network.service

-

Stops a service.

-

service network restart

-

systemctl restart network.service

-

Restarts a service.

-

service network reload

-

systemctl reload network.service

-

Reloads a configuration file without interrupting an operation.

-

service network condrestart

-

systemctl condrestart network.service

-

Restarts a service only if it is running.

-

service network status

-

systemctl status network.service

-

Checks the service running status.

-

chkconfig network on

-

systemctl enable network.service

-

Enables a service when the service activation time arrives or a trigger condition for enabling the service is met.

-

chkconfig network off

-

systemctl disable network.service

-

Disables a service when the service activation time arrives or a trigger condition for disabling the service is met.

-

chkconfig network

-

systemctl is-enabled network.service

-

Checks whether a service is enabled.

-

chkconfig \-\-list

-

systemctl list-unit-files \-\-type=service

-

Lists all services in each runlevel and checks whether they are enabled.

-

chkconfig network \-\-list

-

ls /etc/systemd/system/*.wants/network.service

-

Lists the runlevels in which a service is enabled and those in which the service is disabled.

-

chkconfig network \-\-add

-

systemctl daemon-reload

-

Used when you need to create a service file or change settings.

-
- -### Listing Services -To list all currently loaded services, run the following command: - -``` -systemctl list-units --type service -``` - -To list all services regardless of whether they are loaded, run the following command \(with the all option\): - -``` -systemctl list-units --type service --all -``` - -Example list of all currently loaded services: - -``` -$ systemctl list-units --type service -UNIT LOAD ACTIVE SUB JOB DESCRIPTION -atd.service loaded active running Deferred execution scheduler -auditd.service loaded active running Security Auditing Service -avahi-daemon.service loaded active running Avahi mDNS/DNS-SD Stack -chronyd.service loaded active running NTP client/server -crond.service loaded active running Command Scheduler -dbus.service loaded active running D-Bus System Message Bus -dracut-shutdown.service loaded active exited Restore /run/initramfs on shutdown -firewalld.service loaded active running firewalld - dynamic firewall daemon -getty@tty1.service loaded active running Getty on tty1 -gssproxy.service loaded active running GSSAPI Proxy Daemon -irqbalance.service loaded active running irqbalance daemon -iscsid.service loaded activating start start Open-iSCSI -``` - -### Displaying Service Status -To display the status of a service, run the following command: - -``` -systemctl status name.service -``` - -[Table 4](#en-us_topic_0151920917_t36cd267d69244ed39ae06bb117ed8e62) describes the parameters in the command output. - -**Table 4** Output parameters - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Loaded

-

Information on whether the service has been loaded, the absolute path to the service file, and a note of whether the service is enabled.

-

Active

-

Information on whether the service is running and a time stamp.

-

Main PID

-

PID of the service.

-

CGroup

-

Additional information about related control groups.

-
- -To verify whether a particular service is running, run the following command: - -``` -systemctl is-active name.service -``` - -The output of the **is-active** command is as follows: - -**Table 5** Output of the is-active command - - - - - - - - - - - - - - - - - - - -

Status

-

Description

-

active(running)

-

One or more services are running in the system.

-

active(exited)

-

A service that ends properly after being executed only once. Currently, no program is running in the system. For example, the quotaon function is performed only when the program is started or mounted.

-

active(waiting)

-

The program needs to wait for other events to continue running. For example, the print queue service is being started, but it needs to be queued (print jobs) so that it can continue to wake up the printer service to perform the next print function.

-

inactive

-

The service is not running.

-
- -Similarly, to determine whether a particular service is enabled, run the following command: - -``` -systemctl is-enabled name.service -``` - -The output of the **is-enabled** command is as follows: - -**Table 6** Output of the is-enabled command - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Status

-

Description

-

"enabled"

-

Has been permanently enabled through Alias= Alias, .wants/, or .requires/ soft link in the /etc/systemd/system/ directory.

-

"enabled-runtime"

-

Has been temporarily enabled through Alias= Alias, .wants/, or .requires/ soft link in the /run/systemd/system/ directory.

-

"linked"

-

Although the unit file is not in the standard unit directory, one or more soft links pointing to the unit file exist in the /etc/systemd/system/ permanent directory.

-

"linked-runtime"

-

Although the unit file is not in the standard unit directory, one or more soft links pointing to the unit file exist in the /run/systemd/system/ temporary directory.

-

"masked"

-

Has been masked permanently by the /etc/systemd/system/ directory (soft link to /dev/null). Therefore, the start operation fails.

-

"masked-runtime"

-

Has been masked temporarily by the /run/systemd/systemd/ directory (soft link to /dev/null). Therefore, the start operation fails.

-

"static"

-

Not enabled. There is no option available for the enable command in the [Install] section of the unit file.

-

"indirect"

-

Not enabled. But the list of values for the Also= option in the [Install] section of the unit file is not empty (that is, some units in the list may have been enabled), or the unit file has an alias soft link which is not in the Also= list. For a template unit, it indicates that an instance different from DefaultInstance= is enabled.

-

"disabled"

-

Not enabled. But the [Install] section of the unit file contains options available for the enable command.

-

"generated"

-

The unit file is dynamically generated by the unit generator. The generated unit file may not be directly enabled, but is implicitly enabled by the unit generator.

-

"transient"

-

The unit file is dynamically and temporarily generated by the runtime API. The temporary unit may not be enabled.

-

"bad"

-

The unit file is incorrect or other errors occur. is-enabled does not return this status, but displays an error message. The list-unit-files command may display this unit.

-
- -For example, to display the status of gdm.service, run the **systemctl status gdm.service** command. - -``` -# systemctl status gdm.service -gdm.service - GNOME Display Manager Loaded: loaded (/usr/lib/systemd/system/gdm.service; enabled) Active: active (running) since Thu 2013-10-17 17:31:23 CEST; 5min ago - Main PID: 1029 (gdm) - CGroup: /system.slice/gdm.service - ├─1029 /usr/sbin/gdm - ├─1037 /usr/libexec/gdm-simple-slave --display-id /org/gno... - └─1047 /usr/bin/Xorg :0 -background none -verbose -auth /r...Oct 17 17:31:23 localhost systemd[1]: Started GNOME Display Manager. -``` - -### Starting a Service -To start a service, run the following command as the user **root**: - -``` -systemctl start name.service -``` - -For example, to start the httpd service, run the following command: - -``` -# systemctl start httpd.service -``` - -### Stopping a Service -To stop a service, run the following command as the user **root**: - -``` -systemctl stop name.service -``` - -For example, to stop the Bluetooth service, run the following command: - -``` -# systemctl stop bluetooth.service -``` - -### Restarting a Service -To restart a service, run the following command as the user **root**: - -``` -systemctl restart name.service -``` - -This command stops the selected service in the current session and immediately starts it again. If the selected service is not running, this command starts it too. - -For example, to restart the Bluetooth service, run the following command: - -``` -# systemctl restart bluetooth.service -``` - -### Enabling a Service -To configure a service to start automatically at system boot time, run the following command as the user **root**: - -``` -systemctl enable name.service -``` - -For example, to configure the httpd service to start automatically at system boot time, run the following command: - -``` -# systemctl enable httpd.service -ln -s '/usr/lib/systemd/system/httpd.service' '/etc/systemd/system/multi-user.target.wants/httpd.service' -``` - -### Disabling a Service -To prevent a service from starting automatically at system boot time, run the following command as the user **root**: - -``` -systemctl disable name.service -``` - -For example, to prevent the Bluetooth service from starting automatically at system boot time, run the following command: - -``` -# systemctl disable bluetooth.service -Removed /etc/systemd/system/bluetooth.target.wants/bluetooth.service. -Removed /etc/systemd/system/dbus-org.bluez.service. -``` - -## Changing a Runlevel - -### Targets and Runlevels -In systemd, the concept of runlevels has been replaced with systemd targets to improve flexibility. For example, you can inherit an existing target and turn it into your own target by adding other services. [Table 7](#en-us_topic_0151920939_t9af92c282ad240ea9a79fb08d26e8181) provides a complete list of runlevels and their corresponding systemd targets. - -**Table 7** Mapping between runlevels and targets - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Runlevel

-

systemd Target

-

Description

-

0

-

runlevel0.target, poweroff.target

-

The operating system is powered off.

-

1, s, single

-

runlevel1.target, rescue.target

-

The operating system is in single user mode.

-

2, 4

-

runlevel2.target, runlevel4.target, multi-user.target

-

The operating system is in user-defined or domain-specific runlevel (by default, it is equivalent to runlevel 3).

-

3

-

runlevel3.target, multi-user.target

-

The operating system is in non-graphical multi-user mode, and can be accessed from multiple consoles or networks.

-

5

-

runlevel5.target, graphical.target

-

The operating system is in graphical multi-user mode. All the services running at level 3 can be accessed through graphical login.

-

6

-

runlevel6.target, reboot.target

-

The operating system is rebooted.

-

emergency

-

emergency.target

-

Emergency shell.

-
- -### Viewing the Default Startup Target -Run the following command to view the default startup target of the system: - -``` -systemctl get-default -``` - -### Viewing All Startup Targets -Run the following command to view all startup targets of the system: - -``` -systemctl list-units --type=target -``` - -### Changing the Default Target -To change the default target, run the following command as the user **root**: - -``` -systemctl set-default name.target -``` - -### Changing the Current Target -To change the current target, run the following command as the user **root**: - -``` -systemctl isolate name.target -``` - -### Changing to Rescue Mode -To change the operating system to rescue mode, run the following command as the user **root**: - -``` -systemctl rescue -``` - -This command is similar to the **systemctl isolate rescue.target** command. After the command is executed, the following information is displayed on the serial port: - -``` -You are in rescue mode. After logging in, type "journalctl -xb" to viewsystem logs, "systemctl reboot" to reboot, "systemctl default" or "exit"to boot into default mode. -Give root password for maintenance -(or press Control-D to continue): -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->You need to restart the system to enter the normal working mode from the rescue mode. - -### Changing to Emergency Mode -To change the operating system to emergency mode, run the following command as the user **root**: - -``` -systemctl emergency -``` - -This command is similar to the **systemctl isolate emergency.target** command. After the command is executed, the following information is displayed on the serial port: - -``` -You are in emergency mode. After logging in, type "journalctl -xb" to viewsystem logs, "systemctl reboot" to reboot, "systemctl default" or "exit"to boot into default mode. -Give root password for maintenance -(or press Control-D to continue): -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->You need to restart the system to enter the normal working mode from the emergency mode. - -## Shutting Down, Suspending, and Hibernating the Operating System - -### systemctl Command -The systemd uses the systemctl command instead of old Linux system management commands to shut down, restart, suspend, and hibernate the operating system. Although previous Linux system management commands are still available in systemd for compatibility reasons, you are advised to use **systemctl** when possible. The mapping relationship is shown in [Table 8](#en-us_topic_0151920964_t3daaaba6a03b4c36be9668efcdb61f3b). - -**Table 8** Mapping between old Linux system management commands and systemctl - - - - - - - - - - - - - - - - - - - - -

Linux Management Command

-

systemctl Command

-

Description

-

halt

-

systemctl halt

-

Shuts down the operating system.

-

poweroff

-

systemctl poweroff

-

Powers off the operating system.

-

reboot

-

systemctl reboot

-

Reboots the operating system.

-
- -### Shutting Down the Operating System -To shut down the system and power off the operating system, run the following command as the user **root**: - -``` -systemctl poweroff -``` - -To shut down the operating system without powering it off, run the following command as the user **root**: - -``` -systemctl halt -``` - -By default, running either of these commands causes systemd to send an informative message to all login users. To prevent systemd from sending this message, run this command with the **\-\-no\-wall** option. The command is as follows: - -``` -systemctl --no-wall poweroff -``` - -### Restarting the Operating System -To restart the operating system, run the following command as the user **root**: - -``` -systemctl reboot -``` - -By default, running either of these commands causes systemd to send an informative message to all login users. To prevent systemd from sending this message, run this command with the **\-\-no\-wall** option. The command is as follows: - -``` -systemctl --no-wall reboot -``` - -### Suspending the Operating System -To suspend the operating system, run the following command as the user **root**: - -``` -systemctl suspend -``` - -### Hibernating the Operating System -To hibernate the operating system, run the following command as the user **root**: - -``` -systemctl hibernate -``` - -To suspend and hibernate the operating system, run the following command as the user **root**: - -``` -systemctl hybrid-sleep -``` diff --git a/docs/en/docs/Administration/setting-up-the-database-server.md b/docs/en/docs/Administration/setting-up-the-database-server.md deleted file mode 100644 index 000f8f03a830276fcab208747b8f059e79915562..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/setting-up-the-database-server.md +++ /dev/null @@ -1,2182 +0,0 @@ -# Setting Up the Database Server - - -- [Setting Up the Database Server](#setting-up-the-database-server) - - [PostgreSQL Server](#postgresql-server) - - [Software Description](#software-description) - - [Configuring the Environment](#configuring-the-environment) - - [Installing, Running, and Uninstalling PostgreSQL](#installing-running-and-uninstalling-postgresql) - - [Managing Database Roles](#managing-database-roles) - - [Managing Databases](#managing-databases) - - [MariaDB Server](#mariadb-server) - - [Software Description](#software-description-1) - - [Configuring the Environment](#configuring-the-environment-1) - - [Installing, Running, and Uninstalling MariaDB Server](#installing-running-and-uninstalling-mariadb-server) - - [Managing Database Users](#managing-database-users) - - [Managing Databases](#managing-databases-1) - - [MySQL Server](#mysql-server) - - [Software Description](#software-description-2) - - [Configuring the Environment](#configuring-the-environment-2) - - [Installing, Running, and Uninstalling MySQL](#installing-running-and-uninstalling-mysql) - - [Managing Database Users](#managing-database-users-1) - - [Managing Databases](#managing-databases-2) - - - -## PostgreSQL Server - - - -### Software Description - -[Figure 1](#fig26022387391) shows the PostgreSQL architecture and [Table 1](#table62020913417) describes the main processes. - -**Figure 1** PostgreSQL architecture -![](./figures/postgresql-architecture.png "postgresql-architecture") - -**Table 1** Main processes in PostgreSQL - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Process Type

-

Process Name

-

Description

-

Main process

-

Postmaster

-

Postmaster process controls all database instances in general and is responsible for starting and stopping database instances.

-

Resident process

-

Postgres (resident process)

-

This process manages backend resident processes and is also called postmaster. By default, this process listens Unix domain sockets and the 5432 port of TCP/IP and waits for the front end to process the connections. You can change the listening port number in the postgresql.conf file of PostgreSQL.

-

Subprocess

-

Postgres (subprocess)

-

The subprocess determines whether to allow the connection according to the security policy defined by the pg_hba.conf file. According to the security policy, the subprocess rejects certain IP addresses and networks, allows only certain users to connect to the databases, or allows only certain databases to be connected.

-

Postgres receives the query from the front end, searches the database, and returns the results. Sometimes, it also updates the database. The updated data is recorded in transaction logs (WAL logs for PostgreSQL). This method is used when the system is powered off, the server breaks down, or the server is restarted. In addition, the logs can also be used for data recovery in other scenarios. In PostgreSQL 9.0 or later, WAL logs can be transferred to other PostgreSQL systems to replicate database in real-time.

-

Auxiliary processes

-

SysLogger (system log)

-

The main process starts the Syslogger auxiliary process only when logging_collection in the Postgres.conf file is set to on.

-

BgWriter (background write)

-

This process writes dirty pages from the shared memory to the drive. The purpose is to improve the performance of inserting, updating, and deleting data.

-

WALWriter (write-ahead log)

-

This process writes modification operations into drives before data is modified so that the data does not need to be persisted into files in subsequent real-time data updates.

-

PgArch (archive)

-

write-ahead logs (WALs) are recycled. The PgArch process backs up WALs before archiving them. After the entire database is backed up, the Point in Time Recovery (PITR) technology can be used to archive WALs. The database can be restored to any point after the full backup by using the full backup data and the subsequently archived WALs.

-

AutoVacuum (automatic cleanup)

-

In the PostgreSQL database, after a DELETE operation is performed on a table, old data is not immediately deleted. When new data is added, the system creates a data row instead of overwriting the old data. The old data is only marked as deleted and will be cleared only when no other concurrent transactions are reading the data. In this case, the data is cleared by the AutoVacuum process.

-

PgStat (statistics collection)

-

This process collects data statistics. It is used to estimate the cost during query optimization, including the number of insertions update, and deletion operations performed on a table or index, the number of drive block read and write operations, and the number of row read operations. pg_statistic stores the information collected by the PgStat.

-

CheckPoint (checkpoint)

-

A checkpoint is a transaction sequence point set by the system. It is used to ensure that log information before a checkpoint written into the drives.

-
- -### Configuring the Environment - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The following environment configuration is for reference only. Configure the environment based on the site requirements. - - - -#### Disabling the Firewall and Automatic Startup - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->It is recommended that firewall be disabled in the test environment to prevent network impact. Configure the firewall based on actual requirements. - -1. Stop the firewall service as the **root** user. - - ``` - # systemctl stop firewalld - ``` - -2. Disable the firewall service as the **root** user. - - ``` - # systemctl disable firewalld - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The automatic startup is automatically disabled as the firewall is disabled. - - -#### Disabling SELinux - -1. Modify the configuration file as the **root** user. - - ``` - # sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux - ``` - - -#### Creating a User Group and a User - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->In the server environment, independent users are assigned to each process to implement permission isolation for security purposes. The user group and user are created for the OS, not for the database. - -1. Create a PostgreSQL user or user group as the **root** user. - - ``` - # groupadd postgres - ``` - - ``` - # useradd -g postgres postgres - ``` - -2. Set the postgres user password as the **root** user. \(Enter the password twice for confirmation.\) - - ``` - #passwd postgres - ``` - - -#### Creating Data Drives - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- When testing the ultimate performance, you are advised to attach NVMe SSDs with better I/O performance to create PostgreSQL test instances to avoid the impact of disk I/O on the performance test result. This section uses NVMe SSDs as an example. For details, see Step 1 to Step 4. ->- In a non-performance test, run the following command as the **root** user to create a data directory. Then skip this section. -> \# mkdir /data - -1. Create a file system \(xfs is used as an example as the **root** user. Create the file system based on the site requirements.\). If a file system has been created for a disk, an error will be reported when you run this command. You can use the **-f** parameter to forcibly create a file system. - - ``` - # mkfs.xfs /dev/nvme0n1 - ``` - -2. Create a data directory. - - ``` - # mkdir /data - ``` - -3. Mount disks. - - ``` - # mount -o noatime,nobarrier /dev/nvme0n1 /data - ``` - - -#### Data Directory Authorization - -1. Modify the directory permission as the **root** user. - - ``` - # chown -R postgres:postgres /data/ - ``` - - -### Installing, Running, and Uninstalling PostgreSQL - - -#### Installing PostgreSQL - -1. Configure the local yum source. For details, see [Configuring the Repo Server](./configuring-the-repo-server.html). -2. Clear the cache. - - ``` - $ dnf clean all - ``` - -3. Create a cache. - - ``` - $ dnf makecache - ``` - -4. Install the PostgreSQL server as the **root** user. - - ``` - #dnf install postgresql-server - ``` - -5. Check the installed RPM package. - - ``` - $ rpm -qa | grep postgresql - ``` - - -#### Running PostgreSQL - - - -##### Initializing the Database - ->![](./public_sys-resources/icon-notice.gif) **NOTICE:** ->Perform this step as the postgres user. - -1. Switch to the created PostgreSQL user. - - ``` - # su - postgres - ``` - -2. Initialize the database. In the command, **/usr/bin** is the directory where the **initdb** command is located. - - ``` - $ /usr/bin/initdb -D /data/ - ``` - - -##### Starting the Database - -1. Enable the PostgreSQL database. - - ``` - $ /usr/bin/pg_ctl -D /data/ -l /data/logfile start - ``` - -2. Check whether the PostgreSQL database process is started properly. - - ``` - $ ps -ef | grep postgres - ``` - - If the following information is displayed, the PostgreSQL processes have been started. - - ![](./figures/postgres.png) - - -##### Logging In to the Database - -1. Log in to the database. - - ``` - $ /usr/bin/psql -U postgres - ``` - - ![](./figures/login.png) - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >You do not need to enter a password when logging in to the database for the first time. - - -##### Configuring the Database Accounts and Passwords - -1. After login, set the postgres user password. - - ``` - postgres=#alter user postgres with password '123456'; - ``` - - ![](./figures/en-us_image_0230050789.png) - - -##### Exiting the Database - -1. Run **\\q** to exit from the database. - - ``` - postgres=# \q - ``` - - -##### Stopping the Database - -1. Stop the PostgreSQL database. - - ``` - $/usr/bin/pg_ctl -D /data/ -l /data/logfile stop - ``` - - -#### Uninstalling PostgreSQL - -1. Stop the database as the postgres user. - - ``` - $ /usr/bin/pg_ctl -D /data/ -l /data/logfile stop - ``` - -2. Run the **dnf remove postgresql-server** command as the user **root** to uninstall the PostgreSQL database. - - ``` - # dnf remove postgresql-server - ``` - - -### Managing Database Roles - - - -#### Creating a Role - -You can use the **CREATE ROLE** statement or **createuser** command to create a role. The **createuser** command encapsulates the **CREATE ROLE** statement and needs to be executed on the shell GUI instead of the database GUI. - -``` -CREATE ROLE rolename [ [ WITH ] option [ ... ] ]; -``` - -``` -createuser rolename -``` - -In the preceding information: - -- **rolename**: indicates a role name. -- Parameters of the _option_ are as follows: - - **SUPERUSER | NOSUPERUSER**: determines whether a new role is a superuser. If this parameter is not specified, the default value **NOSUPERUSER** is used, indicating that the role is not a superuser. - - **CREATEDB | NOCREATEDB**: specifies whether a role can create a database. If this parameter is not specified, the default value **NOCREATEDB** is used, indicating that the role cannot create a database. - - **CREATEROLE | NOCREATEROLE**: determines whether a role can create roles. If this parameter is not specified, the default value **NOCREATEROLE** is used, indicating that the role cannot create roles. - - **INHERIT | NOINHERIT**: determines whether a role inherits the other roles' permissions in the group to which the role belongs. A role with the INHERIT attribute can automatically use any permissions that have been assigned to its direct or indirect group. If this parameter is not specified, the default value **INHERIT** is used. - - **LOGIN | NOLOGIN**: determines whether a role can log in. A role with the LOGIN attribute can be considered as a user. A role without this attribute can be used to manage database permissions but is not a user. If this attribute is not specified, the default value **NOLOGIN** is used. However, if **CREATE USER** instead of **CREATE ROLE** is used to create a role, the LOGIN attribute is used by default. - - **\[ENCRYPTED | UNENCRYPTED\] PASSWORD'password'**: password of a role. The password is valid only for roles with the LOGIN attribute. **ENCRYPTED | UNENCRYPTED**: determines whether to encrypt the password. If this parameter is not specified, the value **ENCRYPTED** is used, that is, the password is encrypted. - - **VALID UNTIL'timestamp'**: specifies the timestamp when the password of a role expires. If this parameter is not specified, the password is permanently valid. - - **IN ROLE rolename1**: lists one or more existing roles. The new role _rolename_ will be added to and become a member of **rolename1**. - - **ROLE rolename2**: lists one or more existing roles. These roles will be automatically added as members of the new role _rolename_. That is, the new role is a user group. - - -To run this command, you must have the CREATEROLE permission or is the database superuser. - -##### Example -\#Create a role **roleexample1** who can log in. - -``` -postgres=# CREATE ROLE roleexample1 LOGIN; -``` - -\#Create a role **roleexample2** with the password **123456**. - -``` -postgres=# CREATE ROLE roleexample2 WITH LOGIN PASSWORD '123456'; -``` - -\#Create a role named **roleexample3**. - -``` -[postgres@localhost ~]$ createuser roleexample3 -``` - -#### Viewing Roles - -You can run the **SELECT** statement or the PostgreSQL meta-command **\\du** to view the role. - -``` -SELECT rolename FROM pg_roles; -``` - -``` -\du -``` - -In the preceding command, _rolename_ indicates the role name. - -##### Example -\#View the **roleexample1** role. - -``` -postgres=# SELECT roleexample1 from pg_roles; -``` - -\#View the existing roles. - -``` -postgres=# \du -``` - -#### Modifying a Role - -##### Modifying a Username -Use the **ALTER ROLE** statement to modify an existing role name. - -``` -ALTER ROLE oldrolername RENAME TO newrolename; -``` - -In the preceding information: - -- _oldrolername_: original role name. -- _newrolename_: new role name. - -##### Example of Modifying a User -\#Change the role name **roleexample1** to **roleexapme2**. - -``` -postgres=# ALTER ROLE roleexample1 RENAME TO roleexample2; -``` - -##### Modifying a User Password -Use the **ALTER ROLE** statement to modify the login password of a role. - -``` -ALTER ROLE rolename PASSWORD 'password' -``` - -In the preceding information: - -- _rolename_: indicates a role name. -- _password_: password. - -##### Example of Modifying the Password of a Role -\#Modify the password of **roleexample1** to **456789**. - -``` -postgres=# ALTER ROLE roleexample1 WITH PASSWORD '456789'; -``` - -#### Deleting a Role - -You can use the **DROP ROLE** statement or **dropuser** command to delete a role. The **dropuser** command encapsulates the **DROP ROLE** statement and needs to be executed on the shell GUI instead of the database GUI. - -``` -DROP ROLE rolename; -``` - -``` -dropuser rolename -``` - -In the preceding command, _rolename_ indicates the role name. - -##### Example -\#Delete the **userexample1** role. - -``` -postgres=# DROP ROLE userexample1; -``` - -\#Delete the **userexample2** role. - -``` -[postgres@localhost ~]$ dropuser userexample2 -``` - -#### Role Permissions - -You can use the **GRANT** statement to grant permissions to a role. - -Grant the table operation permission to a role. - -``` -GRANT { { SELECT | INSERT | UPDATE | DELETE | REFERENCES | TRIGGER } [,...] | ALL [ PRIVILEGES ] } ON [ TABLE ] tablename [, ...] TO { rolename | GROUP groupname | PUBLIC } [, ...] [ WITH GRANT OPTION ] -``` - -Grant the sequence operation permission to a role. - -``` -GRANT { { USAGE | SELECT | UPDATE } [,...] | ALL [ PRIVILEGES ] } ON SEQUENCE sequencename [, ...] TO { rolename | GROUP groupname | PUBLIC } [, ...] [ WITH GRANT OPTION ] -``` - -Grant the database operation permission to a role. - -``` -GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] } ON DATABASE databasename [, ...] TO { rolename | GROUP groupname | PUBLIC } [, ...] [ WITH GRANT OPTION ] -``` - -Grant the function operation permission to a role. - -``` -GRANT { EXECUTE | ALL [ PRIVILEGES ] } ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...] TO { rolename | GROUP groupname | PUBLIC } [, ...] [ WITH GRANT OPTION ] -``` - -Grant the operation permission of the procedural language to a role. - -``` -GRANT { USAGE | ALL [ PRIVILEGES ] } ON LANGUAGE langname [, ...] TO { rolename | GROUP groupname | PUBLIC } [, ...] [ WITH GRANT OPTION ] -``` - -Grant the schema operation permission to a role. - -``` -GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] } ON SCHEMA schemaname [, ...] TO { rolename | GROUP groupname | PUBLIC } [, ...] [ WITH GRANT OPTION ] -``` - -Grant the tablespace operation permission to a role. - -``` -GRANT { CREATE | ALL [ PRIVILEGES ] } ON TABLESPACE tablespacename [, ...] TO { rolename | GROUP groupname | PUBLIC } [, ...] [ WITH GRANT OPTION ] -``` - -Assign the member relationship of rolename1 to rolename2. - -``` -GRANT rolename1 [, ...] TO rolename2 [, ...] [ WITH ADMIN OPTION ] -``` - -In the preceding information: - -- **SELECT**, **INSERT**, **UPDATE**, **DELETE**, **REFERENCES**, **TRIGGER**, **USAGE**, **CREATE**, **CONNECT**, **TEMPORARY**, **TEMP**, **EXECUTE**, and **ALL \[**_PRIVILEGES_**\]** indicate user operation permissions. **ALL \[**_PRIVILEGES_**\]** indicates all permissions, the _PRIVILEGES_ keyword is optional in PostgreSQL, but it is required in strict SQL statements. -- **ON** clause: specifies the object on which the permission is granted. -- **tablename**: table name. -- **TO** clause: specifies the role to which the permission is granted. -- **rolename**, **rolename1**, and **rolename2**: role names. -- **groupname**: name of a role group. -- **PUBLIC**: indicates that the permission is granted to all roles, including users who may be created later. -- **WITH GRANT OPTION**: indicates that the recipient of a permission can grant the permission to others. This option cannot be assigned to PUBLIC. -- **sequencename**: sequence name. -- **databasename**: database name. -- **funcname \(\[\[argmode\] \[argname\] argtype \[, ...\]\]\)**: function name and its parameters. -- **langname**: procedural language name. -- **schemaname**: schema name. -- **tablespacename**: tablespace name. -- **WITH ADMIN OPTION**: A member can assign the member relationship of a role to other roles and cancel the member relationship of other roles. - -##### Example -\#Grant the CREATE permission on database1 to userexample. - -``` -postgres=# GRANT CREATE ON DATABASE database1 TO userexample; -``` - -\#Grant all permissions on table1 to all users. - -``` -postgres=# GRANT ALL PRIVILEGES ON TABLE table1 TO PUBLIC; -``` - -#### Deleting User Permissions - -You can use the **REVOKE** statement to revoke the permissions previously granted to one or more roles. - -Revoke the table operation permission from a role. - -``` -REVOKE [ GRANT OPTION FOR ] { { SELECT | INSERT | UPDATE | DELETE | REFERENCES | TRIGGER } [,...] | ALL [ PRIVILEGES ] } ON [ TABLE ] tablename [, ...] FROM { rolename | GROUP groupname | PUBLIC } [, ...] -``` - -Revoke the sequence operation permission from a role. - -``` -REVOKE [ GRANT OPTION FOR ] { { USAGE | SELECT | UPDATE } [,...] | ALL [ PRIVILEGES ] } ON SEQUENCE sequencename [, ...] FROM { rolename | GROUP groupname | PUBLIC } [, ...] [ CASCADE | RESTRICT ] -``` - -Revoke the database operation permission from a role. - -``` -REVOKE [ GRANT OPTION FOR ] { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] } ON DATABASE databasename [, ...] FROM { rolename | GROUP groupname | PUBLIC } [, ...] [ CASCADE | RESTRICT ] -``` - -Revoke the function operation permission from a role. - -``` -REVOKE [ GRANT OPTION FOR ] { EXECUTE | ALL [ PRIVILEGES ] } ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...] FROM { rolename | GROUP groupname | PUBLIC } [, ...] [ CASCADE | RESTRICT ] -``` - -Revoke the procedural language operation permission from a role. - -``` -REVOKE [ GRANT OPTION FOR ] { USAGE | ALL [ PRIVILEGES ] } ON LANGUAGE langname [, ...] FROM { rolename | GROUP groupname | PUBLIC } [, ...] [ CASCADE | RESTRICT ] -``` - -Revoke the schema operation permission from a role. - -``` -REVOKE [ GRANT OPTION FOR ] { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] } ON SCHEMA schemaname [, ...] FROM { rolename | GROUP groupname | PUBLIC } [, ...] [ CASCADE | RESTRICT ] -``` - -Revoke the tablespace operation permission from a role. - -``` -REVOKE [ GRANT OPTION FOR ] { CREATE | ALL [ PRIVILEGES ] } ON TABLESPACE tablespacename [, ...] FROM { rolename | GROUP groupname | PUBLIC } [, ...] [ CASCADE | RESTRICT ] -``` - -Revoke the member relationship of rolename1 from rolename2. - -``` -REVOKE [ ADMIN OPTION FOR ] rolename1 [, ...] FROM rolename2 [, ...] [ CASCADE | RESTRICT ] -``` - -In the preceding information: - -- **GRANT OPTION FOR**: The permission cannot be granted to others, but permission itself is not revoked. -- **SELECT**, **INSERT**, **UPDATE**, **DELETE**, **REFERENCES**, **TRIGGER**, **USAGE**, **CREATE**, **CONNECT**, **TEMPORARY**, **TEMP**, **EXECUTE**, and **ALL \[**_PRIVILEGES_**\]** indicate user operation permissions. **ALL \[**_PRIVILEGES_**\]** indicates all permissions, the _PRIVILEGES_ keyword is optional in PostgreSQL, but it is required in strict SQL statements. -- **ON** clause: specifies the object on which the permission is revoked. -- _tablename_: table name. -- **FROM** clause: specifies the role whose permission is revoked. -- _rolename_, _rolename1_, and _rolename2_: role names. -- _groupname_: name of a role group. -- **PUBLIC**: revokes the implicitly defined groups that have all roles. However, this does not mean that all roles lose the permissions. The permissions directly obtained and the permissions obtained through a group are still valid. -- _sequencename_: sequence name. -- **CASCADE**: revokes all dependent permissions. -- **RESTRICT**: does not revoke all dependent permissions. -- _databasename_: database name. -- **funcname \(**_\[\[argmode\] \[argname\] argtype \[, ...\]\]_**\)**: function name and its parameters. -- _langname_: procedural language name. -- _schemaname_: schema name. -- _tablespacename_: tablespace name. -- **ADMIN OPTION FOR**: The transferred authorization is not automatically revoked. - -##### Example -\#Grant the CREATE permission on database1 to userexample. - -``` -postgres=# GRANT CREATE ON DATABASE database1 TO userexample; -``` - -\#Grant all permissions on table1 to all users. - -``` -postgres=# GRANT ALL PRIVILEGES ON TABLE table1 TO PUBLIC; -``` - -### Managing Databases - - - -#### Creating a Database - -You can use the **CREATE DATABASE** statement or the **createdb** command to create a role. The **createdb** command encapsulates the **CREATE DATABASE** statement and needs to be executed on the shell GUI instead of the database GUI. - -``` -CREATE DATABASE databasename; -``` - -``` -createdb databasename -``` - -In the preceding command, **databasename** indicates the database name. - -To use this command, you must have the CREATEDB permission. - -##### Example -\# Create a database named **database1**. - -``` -postgres=# CREATE DATABASE database1; -``` - -#### Selecting a Database - -Use the **\\c** statement to select a database. - -``` -\c databasename; -``` - -In the preceding command, **databasename** indicates the database name. - -##### Example -\#Select the **databaseexample** database. - -``` -postgres=# \c databaseexample; -``` - -#### Viewing a Database - -Use the **\\l** statement to view the database. - -``` -\l; -``` - -##### Example -\#View all databases. - -``` -postgres=# \l; -``` - -#### Deleting a Database - -You can run the **DROP DATABASE** statement or **dropdb** command to delete a database. The **dropdb** command encapsulates the **DROP DATABASE** statement and needs to be executed on the shell GUI instead of the database GUI. - ->![](./public_sys-resources/icon-caution.gif) **CAUTION:** ->Exercise caution when deleting a database. Once a database is deleted, all tables and data in the database will be deleted. - -``` -DROP DATABASE databasename; -``` - -``` -dropdb databasename -``` - -In the preceding command, **databasename** indicates the database name. - -The **DROP DATABASE** statement deletes the system directory items of the database and the file directories that contain data. - -**DROP DATABASE** can be executed only by the super administrator or database owner. - -##### Example -\#Delete the **databaseexample** database. - -``` -postgres=# DROP DATABASE databaseexample; -``` - -#### Backing Up a Database - -Run the **pg\_dump** command to back up the database and dump the database to a script file or another archive file. - -``` -pg_dump [option]... [databasename] > outfile -``` - -In the preceding information: - -- _databasename_: database name. If this parameter is not specified, the environment variable **PGDATABASE** is used. If that environment variable is not specified, use the username that initiates the connection. -- _outfile_: database backup file. -- _option_: parameter option of the **pg\_dump** command. Multiple parameters can be separated by spaces. The common parameters of the **pg\_dump** command are as follows: - - **-f, \-\-file**= _filename_: specified output file. If this parameter is ignored, the standard output is used. - - **-d, \-\-dbname**= _databasename_: database to be dumped. - - **-h, \-\-host**= _hostname_: specifies the hostname. - - **-p, \-\-port**= _portnumber_: port number. - - **-U, \-\-username**= _username_: username of the connection. - - **-W, \-\-password**: forces PostgreSQL to prompt for a password before connecting to a database. - - -##### Example -\#Back up the database1 database of user **postgres** on port **3306** of the host whose IP address is **192.168.202.144** to the **db1.sql** file. - -``` -[postgres@localhost ~]$ pg_dump -h 192.168.202.144 -p 3306 -U postgres -W database1 > db1.sql -``` - -#### Restoring a Database - -Run the **psql** command to restore the database. - -``` -psql [option]... [databasename [username]] < infile -``` - -In the preceding information: - -- _databasename_: database name. If this parameter is not specified, the environment variable **PGDATABASE** is used. If that environment variable is not specified, use the username that initiates the connection. -- _username_: name of a user. -- _infile_: **outfile** parameter in the **pg\_dump** command. -- _option_: parameter option of the **psql** command. Multiple parameters can be separated by spaces. The common parameters of the **psql** command are as follows: - - **-f, \-\-file**= _filename_: specified output file. If this parameter is ignored, the standard output is used. - - **-d, \-\-dbname**= _databasename_: database to be dumped. - - **-h, \-\-host**= _hostname_: specifies the hostname. - - **-p, \-\-port**= _portnumber_: port number. - - **-U, \-\-username**= _username_: username of the connection. - - **-W, \-\-password**: forces PostgreSQL to prompt for a password before connecting to a database. - - -The **psql** command cannot be used to automatically create the **databasename** database. Therefore, you need to create the **databasename** database before running the **psql** command to restore the database. - -##### Example -\#Import the **db1.sql** script file to the newdb database of the postgres user on the host **192.168.202.144** through port **3306**. - -``` -[postgres@localhost ~]$ createdb newdb -[postgres@localhost ~]$ psql -h 192.168.202.144 -p 3306 -U postgres -W -d newdb < db1.sql -``` - -## MariaDB Server - - -### Software Description - -The MariaDB database management system is a branch of MySQL and is maintained by the open-source community. The MariaDB database management system uses the General Public License \(GPL\). MariaDB is designed to be fully compatible with MySQL, including APIs and command lines, so that it can easily replace MySQL. MariaDB also provides many new features. - -[Figure 2](#fig13492418164520) shows the MariaDB architecture. - -**Figure 2** MariaDB logical architecture -![](./figures/mariadb-logical-architecture.png "mariadb-logical-architecture") - -When MariaDB receives a SQL statement, the execution process is as follows: - -1. When a client connects to MariaDB, the hostname, username, and password of the client are authenticated. The authentication function can be implemented as a plug-in. -2. If the login is successful, the client sends SQL commands to the server. The parser parses the SQL statements. -3. The server checks whether the client has the permission to obtain the required resources. -4. If the query has been stored in the query cache, the result is returned immediately. -5. The optimizer will find the fastest execution policy or plan. That is, the optimizer can determine which tables will be read, which indexes will be accessed, and which temporary tables will be used. A good policy can reduce a large number of disk access and sorting operations. -6. Storage engines read and write data and index files. Caches are used to accelerate these operations. Other features such as transactions and foreign keys are processed at the storage engine layer. - -Storage engines manage and control data at the physical layer. They manage data files, data, indexes, and caches, making data management and reading more efficient. Each table has a .frm file that contains table definitions. - -Each storage engine manages and stores data in different ways, and supports different features and performance. For example: - -- MyISAM: suitable for environments with more reads and fewer writes. It does not support transactions and supports full-text indexes. -- noDB: supports transactions, row locks, and foreign keys. -- MEMORY: stores data in the memory. -- CSV: stores data in CSV format. - -### Configuring the Environment - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The following environment configuration is for reference only. Configure the environment based on the site requirements. - - - -#### Disabling the Firewall and Automatic Startup - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->It is recommended that firewall be disabled in the test environment to prevent network impact. Configure the firewall based on actual requirements. - -1. Stop the firewall service as the **root** user. - - ``` - # systemctl stop firewalld - ``` - -2. Disable the firewall service as the **root** user. - - ``` - # systemctl disable firewalld - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The automatic startup is automatically disabled as the firewall is disabled. - - -#### Disabling SELinux - -1. Modify the configuration file as the **root** user. - - ``` - # sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux - ``` - - -#### Creating a User Group and a User - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->In the server environment, independent users are assigned to each process to implement permission isolation for security purposes. The user group and user are created for the OS, not for the database. - -1. Create a MySQL user or user group as the **root** user. - - ``` - # groupadd mysql - ``` - - ``` - # useradd -g mysql mysql - ``` - -2. Set the user password as the **root** user. - - ``` - # passwd mysql - ``` - - Enter the password twice for confirmation. - - -#### Creating Data Drives - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- If a performance test needs to be performed, an independent drive is required for the data directory. You need to format and mount the drive. For details, see Method 1 or Method 2. ->- In a non-performance test, run the following command as the **root** user to create a data directory. Then skip this section. -> \# mkdir /data - -##### Method 1: Using fdisk for Drive Management as the **root** user -1. Create a partition, for example, **/dev/sdb**. - - ``` - # fdisk /dev/sdb - ``` - -2. Enter **n** and press **Enter**. -3. Enter **p** and press **Enter**. -4. Enter **1** and press **Enter**. -5. Retain the default settings and press **Enter**. -6. Retain the default settings and press **Enter**. -7. Enter **w** and press **Enter**. -8. Create a file system, for example, **xfs**. - - ``` - # mkfs.xfs /dev/sdb1 - ``` - -9. Mount the partition to **/data** for the OS. - - ``` - # mkdir /data - ``` - - ``` - # mount /dev/sdb1 /data - ``` - -10. Run the **vi /etc/fstab** command and edit the **/etc/fstab** file to enable the data drive to be automatically mounted after the system is restarted. For example, add the content in the last line, as shown in the following figure. - - In the last line, **/dev/nvme0n1p1** is only an example. - - ![](./figures/creat_datadisk1.png) - - -##### Method 2: Using LVM for Drive Management as the **root** user ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Install the LVM2 package in the image as follows: ->1. Configure the local yum source. For details, see [Configuring the Repo Server](./configuring-the-repo-server.html). If the repository has been configured, skip this step. ->2. Install LVM2. -> **\# yum install lvm2** - -1. Create a physical volume, for example, **sdb**. - - ``` - # pvcreate /dev/sdb - ``` - -2. Create a physical volume group, for example, **datavg**. - - ``` - # vgcreate datavg /dev/sdb - ``` - -3. Create a logical volume, for example, **datalv** of 600 GB. - - ``` - # lvcreate -L 600G -n datalv datavg - ``` - -4. Create a file system. - - ``` - # mkfs.xfs /dev/datavg/datalv - ``` - -5. Create a data directory and mount it. - - ``` - # mkdir /data - ``` - - ``` - # mount /dev/datavg/datalv /data - ``` - -6. Run the **vi /etc/fstab** command and edit the **/etc/fstab** file to enable the data drive to be automatically mounted after the system is restarted. For example, add the content in the last line, as shown in the following figure. - - In the last line, **/dev/datavg/datalv** is only an example. - - ![](./figures/d1376b2a-d036-41c4-b852-e8368f363b5e.png) - - -#### Creating a Database Directory and Granting Permissions - -1. In the created data directory **/data**, create directories for processes and grant permissions to the MySQL group or user created as the **root** user. - - ``` - # mkdir -p /data/mariadb - # cd /data/mariadb - # mkdir data tmp run log - # chown -R mysql:mysql /data - ``` - - -### Installing, Running, and Uninstalling MariaDB Server - - -#### Installing MariaDB - -1. Configure the local yum source. For details, see [Configuring the Repo Server](./configuring-the-repo-server.html). -2. Clear the cache. - - ``` - $ dnf clean all - ``` - -3. Create a cache. - - ``` - $ dnf makecache - ``` - -4. Install the MariaDB server. - - ``` - # dnf install mariadb-server - ``` - -5. Check the installed RPM package. - - ``` - $ rpm -qa | grep mariadb - ``` - - -#### Running MariaDB Server - -1. Start the MariaDB server as the **root** user. - - ``` - # systemctl start mariadb - ``` - -2. Initialize the database as the **root** user. - - ``` - # /usr/bin/mysql_secure_installation - ``` - - During the command execution, you need to enter the password of the database user **root**. If no password is set, press **Enter**. Then, set the password as prompted. - -3. Log in to the database. - - ``` - $ mysql -u root -p - ``` - - After the command is executed, the system prompts you to enter the password. The password is the one set in [2](#li197143190587). - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Run the **\\q** or **exit** command to exit the database. - - -#### Uninstalling MariaDB - -1. Stop the database process as the **root** user. - - ``` - $ ps -ef | grep mysql - # kill -9 PID - ``` - -2. Run the **dnf remove mariadb-server** command as the **root** user to uninstall MariaDB. - - ``` - # dnf remove mariadb-server - ``` - - -### Managing Database Users - - - -#### Creating Users - -Run the **CREATE USER** statement to create one or more users and set corresponding passwords. - -``` -CREATE USER 'username'@'hostname' IDENTIFIED BY 'password'; -``` - -In the preceding information: - -- _username_: name of a user. -- _host_: hostname, that is, the name of the host where the user connects to the database. As a local user, you can set the parameter to **localhost**. If the host name is not specified during user creation, the host name is **%** by default, indicating a group of hosts. -- _password_: password for logging in to the server. The password can be null. If the password is null, the user can log in to the server without entering the password. This method, however, is not recommended because it provides low security. - -To use the **CREATE USER** statement, you must have the INSERT permission on the database or the global CREATE USER permission. - -After a user account is created using the **CREATE USER** statement, a record is added to the user table in the database. If the account to be created exists, an error will occur during statement execution. - -A new user has few permissions and can perform only operations that do not require permissions. For example, a user can run the **SHOW** statement to query the list of all storage engines and character sets. - -##### Example -\#Create a local user whose password is 123456 and username is userexample1. - -``` -> CREATE USER 'userexample1'@'localhost' IDENTIFIED BY '123456'; -``` - -\#Create a user whose password is 123456, username is userexample2, and hostname is 192.168.1.100. - -``` -> CREATE USER 'userexample2'@'192.168.1.100' IDENTIFIED BY '123456'; -``` - -#### Viewing Users - -Run the **SHOW GRANTS** or **SELECT** statement to view one or more users. - -View a specific user: - -``` -SHOW GRANTS [FOR 'username'@'hostname']; -``` - -``` -SELECT USER,HOST,PASSWORD FROM mysql.user WHERE USER='username'; -``` - -View all users: - -``` -SELECT USER,HOST,PASSWORD FROM mysql.user; -``` - -In the preceding information: - -- _username_: name of a user. -- _hostname_: host name. - -##### Example -\#View the user userexample1. - -``` -> SHOW GRANTS FOR 'userexample1'@'localhost'; -``` - -\#View all users in the MySQL database. - -``` -> SELECT USER,HOST,PASSWORD FROM mysql.user; -``` - -#### Modifying Users - -##### Modifying a Username -Run the **RENAME USER** statement to change one or more existing usernames. - -``` -RENAME USER 'oldusername'@'hostname' TO 'newusername'@'hostname'; -``` - -In the preceding information: - -- _oldusername_: original username. -- _newusername_: new username. -- _hostname_: host name. - -The **RENAME USER** statement is used to rename an existing account. If the original account does not exist in the system or the new account exists, an error will occur when the statement is executed. - -To use the **RENAME USER** statement, you must have the UPDATE permission on the database or the global CREATE USER permission. - -##### Example of Modifying a User -\# Change the username **userexample1** to **userexample2** and change the hostname to **locahost**. - -``` -> RENAME USER 'userexample1'@'localhost' TO 'userexample2'@'localhost'; -``` - -##### Modifying a User Password -Use the **SET PASSWORD** statement to modify the login password of a user. - -``` -SET PASSWORD FOR 'username'@'hostname' = PASSWORD('newpassword'); -``` - -In the preceding information: - -- **FOR 'username'@'hostname'**: specifies the username and hostname whose password is to be changed. This parameter is optional. -- **PASSWORD\('newpassword'\)**: indicates that the **PASSWORD\(\)** function is used to set a new password. That is, the new password must be transferred to the **PASSWORD\(\)** function for encryption. - ->![](./public_sys-resources/icon-caution.gif) **CAUTION:** ->The **PASSWORD\(\)** function is a unidirectional encryption function. Once encrypted, the original plaintext cannot be decrypted. - -If the **FOR** clause is not added to the **SET PASSWORD** statement, the password of the current user is changed. - -The **FOR** clause must be given in the format of **'**_username_**'@'**_hostname_**'**, where _username_ indicates the username of the account and _hostname_ indicates the hostname of the account. - -The account whose password is to be changed must exist in the system. Otherwise, an error occurs when the statement is executed. - -##### Example of Changing a User Password -\#Change the password of user **userexample** whose hostname is **locahost** to **0123456**. - -``` -> SET PASSWORD FOR 'userexample'@'localhost' = PASSWORD('0123456') ; -``` - -#### Deleting Users - -Use the **DROP USER** statement to delete one or more user accounts and related permissions. - -``` -DROP USER 'username1'@'hostname1' [,'username2'@'hostname2']...; -``` - ->![](./public_sys-resources/icon-caution.gif) **CAUTION:** ->The deletion of users does not affect the tables, indexes, or other database objects that they have created, because the database does not record the accounts that have created these objects. - -The **DROP USER** statement can be used to delete one or more database accounts and their original permissions. - -To use the **DROP USER** statement, you must have the DELETE permission on the database or the global CREATE USER permission. - -In the **DROP USER** statement, if the hostname of an account is not specified, the hostname is **%** by default. - -##### Example -\#Delete the local user **userexample**. - -``` -> DROP USER 'userexample'@'localhost'; -``` - -#### Granting Permissions to a User - -Run the **GRANT** statement to grant permissions to a new user. - -``` -GRANT privileges ON databasename.tablename TO 'username'@'hostname'; -``` - -In the preceding information: - -- **ON** clause: specifies the object and its level on which the permission is granted. -- **privileges**: indicates the operation permissions of a user, such as **SELECT**, INSERT, and **UPDATE**. To grant all permissions to a user, use **ALL**. -- _databasename_: database name. -- _tablename_: table name. -- **TO** clause: sets the user password and specifies the user to whom the permission is granted. -- _username_: name of a user. -- _hostname_: host name. - -To grant the user the permission to operate all databases and tables, use asterisks \(\*\), for example, **\*.\***. - -If you specify a password for an existing user in the **TO** clause, the new password will overwrite the original password. - -If the permission is granted to a non-existent user, a **CREATE USER** statement is automatically executed to create the user, but the password must be specified for the user. - -##### Example -\#Grant the SELECT and INSERT permissions to local user userexample. - -``` -> GRANT SELECT,INSERT ON *.* TO 'userexample'@'localhost'; -``` - -#### Deleting User Permissions - -Run the **REVOKE** statement to delete the permissions of a user, but the user will not be deleted. - -``` -REVOKE privilege ON databasename.tablename FROM 'username'@'hostname'; -``` - -The parameters in the **REVOKE** statement are the same as those in the **GRANT** statement. - -To use the **REVOKE** statement, you must have the global CREATE USER or UPDATE permission for the database. - -##### Example -\#Delete the INSERT permission of local user userexample. - -``` -> REVOKE INSERT ON *.* FROM 'userexample'@'localhost'; -``` - -### Managing Databases - - - -#### Creating a Database - -Run the **CREATE DATABASE** statement to create a database. - -``` -CREATE DATABASE databasename; -``` - -In the preceding command, _databasename_ can be replaced with the database name, which is case insensitive. - -##### Example -\#Create a database named **databaseexample**. - -``` -> CREATE DATABASE databaseexample; -``` - -#### Viewing a Database - -Run the **SHOW DATABASES** statement to view a database. - -``` -SHOW DATABASES; -``` - -##### Example -\#View all databases. - -``` -> SHOW DATABASES; -``` - -#### Selecting a Database - -Generally, you need to select a target database before creating or querying a table. Use the **USE** statement to select a database. - -``` -USE databasename; -``` - -In the preceding command, **databasename** indicates the database name. - -##### Example -\#Select the **databaseexample** database. - -``` -> USE databaseexample; -``` - -#### Deleting a Database - -You can run the **DROP DATABASE** statement to delete a database. - ->![](./public_sys-resources/icon-caution.gif) **CAUTION:** ->Exercise caution when deleting a database. Once a database is deleted, all tables and data in the database will be deleted. - -``` -DROP DATABASE databasename; -``` - -In the preceding command, **databasename** indicates the database name. - -The **DROP DATABASE** command is used to delete an existing database. After this command is executed, all tables in the database are deleted, but the user permissions of the database are not automatically deleted. - -To use **DROP DATABASE**, you need the **DROP** permission on the database. - -**DROP SCHEMA** is a synonym of **DROP DATABASE**. - -##### Example -\#Delete the **databaseexample** database. - -``` -> DROP DATABASE databaseexample; -``` - -#### Backing Up a Database - -Run the **mysqldump** command as the **root** user to back up the database. - -Back up one or more tables. - -``` -mysqldump [options] databasename [tablename ...] > outfile -``` - -Back up one or more databases: - -``` -mysqldump [options] -databases databasename ... > outfile -``` - -Back up all databases: - -``` -mysqldump [options] -all-databases > outputfile -``` - -In the preceding information: - -- _databasename_: database name. -- _tablename_: name of a data table. -- _outfile_: database backup file. -- _options_: parameter option of the **mysqldump** command. Multiple parameters can be separated by spaces. The common parameters of the **mysqldump** command are as follows: - - **-u, \-\-user**= _username_: specifies the username. - - **-p, \-\-password**\[= _password_\]: specifies the password. - - **-P, \-\-port**= _portnumber_: specifies the port number. - - **-h, \-\-host**= _hostname_: specifies the hostname. - - **-r, \-\-result-file**= _filename_: saves the export result to a specified file, which is equivalent to **\>**. - - **-t**: backs up data only. - - **-d**: backs up the table structure only. - - -##### Example -\#Back up all the databases of the user **root** on the host **192.168.202.144** through port **3306** to the **alldb.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 --all-databases > alldb.sql -``` - -\#Back up the db1 database of the user **root** on the host **192.168.202.144** through port **3306** to the **db1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 --databases db1 > db1.sql -``` - -\#Back up the tb1 table of the db1 database of the user **root** on the host **192.168.202.144** through port **3306** to the **db1tb1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 db1 tb1 > db1tb1.sql -``` - -\#Back up only the table structure of the db1 database of user **root** on port **3306** of the host whose IP address is **192.168.202.144** to the **db1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 -d db1 > db1.sql -``` - -\#Back up only the data of the db1 database of the user **root** on the host **192.168.202.144** through port **3306** to the **db1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 -t db1 > db1.sql -``` - -#### Restoring a Database - -Run the **mysql** command as the **root** user to restore the database. - -Restore one or more tables: - -``` -mysql -h hostname -P portnumber -u username -ppassword databasename < infile -``` - -In the preceding information: - -- _hostname_: host name. -- _portnumber_: port number. -- _username_: name of a user. -- _password_: password. -- _databasename_: database name. -- _infile_: **outfile** parameter in the **mysqldump** command. - -##### Example -\#Restore a database. - -``` -# mysql -h 192.168.202.144 -P 3306 -uroot -p123456 -t db1 < db1.sql -``` - -## MySQL Server - - - -### Software Description - -MySQL is a relational database management system \(RDBMS\) developed by the Swedish company MySQL AB, which was bought by Sun Microsystems \(now Oracle\). It is one of the most popular Relational Database Management Systems \(RDBMSs\) in the industry, especially for web applications. - -A relational database stores data in different tables instead of in a large data warehouse to improve efficiency and flexibility. - -The Structured Query Language \(SQL\) used by MySQL is the most common standard language for accessing databases. MySQL uses dual-licensing distribution and is available in two editions: Community Edition and Commercial Edition. MySQL is optimal for small or medium-sized websites because of its small size, fast speed, low cost, and especially the open source code. - -### Configuring the Environment - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The following environment configuration is for reference only. Configure the environment based on the site requirements. - - - -#### Disabling the Firewall and Automatic Startup - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->It is recommended that firewall be disabled in the test environment to prevent network impact. Configure the firewall based on actual requirements. - -1. Stop the firewall service as the **root** user. - - ``` - # systemctl stop firewalld - ``` - -2. Disable the firewall service as the **root** user. - - ``` - # systemctl disable firewalld - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The automatic startup is automatically disabled as the firewall is disabled. - - -#### Disabling SELinux - -1. Modify the configuration file as the **root** user. - - ``` - # sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux - ``` - - -#### Creating a User Group and a User - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->In the server environment, independent users are assigned to each process to implement permission isolation for security purposes. The user group and user are created for the OS, not for the database. - -1. Create a MySQL user or user group as the **root** user. - - ``` - # groupadd mysql - ``` - - ``` - # useradd -g mysql mysql - ``` - -2. Set the user password as the **root** user. - - ``` - # passwd mysql - ``` - - Enter the password twice for confirmation. - - -#### Creating Data Drives - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- If a performance test needs to be performed, an independent drive is required for the data directory. You need to format and mount the drive. For details, see Method 1 or Method 2. ->- In a non-performance test, run the following command as the **root** user to create a data directory. Then skip this section. -> \# mkdir /data - -##### Method 1: Using fdisk for Drive Management as the **root** user -1. Create a partition, for example, **/dev/sdb**. - - ``` - # fdisk /dev/sdb - ``` - -2. Enter **n** and press **Enter**. -3. Enter **p** and press **Enter**. -4. Enter **1** and press **Enter**. -5. Retain the default settings and press **Enter**. -6. Retain the default settings and press **Enter**. -7. Enter **w** and press **Enter**. -8. Create a file system, for example, **xfs**. - - ``` - # mkfs.xfs /dev/sdb1 - ``` - -9. Mount the partition to **/data** for the OS. - - ``` - # mkdir /data - ``` - - ``` - # mount /dev/sdb1 /data - ``` - -10. Run the **vi /etc/fstab** command and edit the **/etc/fstab** file to enable the data drive to be automatically mounted after the system is restarted. For example, add the content in the last line, as shown in the following figure. - - In the last line, **/dev/nvme0n1p1** is only an example. - - ![](./figures/creat_datadisk.png) - - -##### Method 2: Using LVM for Drive Management as the **root** user ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Install the LVM2 package in the image as follows: ->1. Configure the local yum source. For details, see [Configuring the Repo Server](./configuring-the-repo-server.html). If the repository has been configured, skip this step. ->2. Install LVM2. -> **\# yum install lvm2** - -1. Create a PV, for example, **sdb**. - - ``` - # pvcreate /dev/sdb - ``` - -2. Create a physical VG, for example, **datavg**. - - ``` - # vgcreate datavg /dev/sdb - ``` - -3. Create an LV, for example, **datalv** of 600 GB. - - ``` - # lvcreate -L 600G -n datalv datavg - ``` - -4. Create a file system. - - ``` - # mkfs.xfs /dev/datavg/datalv - ``` - -5. Create a data directory and mount it. - - ``` - # mkdir /data - ``` - - ``` - #mount /dev/datavg/datalv /data - ``` - -6. Run the **vi /etc/fstab** command and edit the **/etc/fstab** file to enable the data drive to be automatically mounted after the system is restarted. For example, add the content in the last line, as shown in the following figure. - - In the last line, **/dev/datavg/datalv** is only an example. - - ![](./figures/d1376b2a-d036-41c4-b852-e8368f363b5e-1.png) - - -#### Creating a Database Directory and Granting Permissions - -1. In the created data directory **/data**, create directories for processes and grant permissions to the MySQL group or user created as the **root** user. - - ``` - # mkdir -p /data/mysql - # cd /data/mysql - # mkdir data tmp run log - # chown -R mysql:mysql /data - ``` - - -### Installing, Running, and Uninstalling MySQL - - - -#### Installing MySQL - -1. Configure the local yum source. For details, see [Configuring the Repo Server](./configuring-the-repo-server.html). -2. Clear the cache. - - ``` - $ dnf clean all - ``` - -3. Create a cache. - - ``` - $ dnf makecache - ``` - -4. Install the MySQL server as the **root** user. - - ``` - # dnf install mysql-server - ``` - -5. Check the installed RPM package. - - ``` - $ rpm -qa | grep mysql-server - ``` - - -#### Running MySQL - -1. Modify the configuration file. - 1. Create the **my.cnf** file as the **root** user and change the file paths \(including the software installation path **basedir** and data path **datadir**\) based on the actual situation. - - ``` - # vi /etc/my.cnf - ``` - - Edit the **my.cnf** file as follows: - - ``` - [mysqld_safe] - log-error=/data/mysql/log/mysql.log - pid-file=/data/mysql/run/mysqld.pid - [mysqldump] - quick - [mysql] - no-auto-rehash - [client] - default-character-set=utf8 - [mysqld] - basedir=/usr/local/mysql - socket=/data/mysql/run/mysql.sock - tmpdir=/data/mysql/tmp - datadir=/data/mysql/data - default_authentication_plugin=mysql_native_password - port=3306 - user=mysql - ``` - - 2. Ensure that the **my.cnf** file is correctly modified. - - ``` - $ cat /etc/my.cnf - ``` - - ![](./figures/en-us_image_0231563132.png) - - >![](./public_sys-resources/icon-caution.gif) **CAUTION:** - >In the configuration file, **basedir** specifies the software installation path. Change it based on actual situation. - - 3. Change the group and user of the **/etc/my.cnf** file to **mysql:mysql** as the **root** user. - - ``` - # chown mysql:mysql /etc/my.cnf - ``` - -2. Configure environment variables. - 1. Add the path of the MySQL binary files to the **PATH** parameter as the **root** user. - - ``` - # echo export PATH=$PATH:/usr/local/mysql/bin >> /etc/profile - ``` - - >![](./public_sys-resources/icon-caution.gif) **CAUTION:** - >In the command, **/usr/local/mysql/bin** is the absolute path of the **bin** files in the MySQL software installation directory. Change it based on actual situation. - - 2. Run the following command as the **root** user to make the environment variables take effect: - - ``` - # source /etc/profile - ``` - -3. Initialize the database as the **root** user. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The second line from the bottom contains the initial password, which will be used when you log in to the database. - - ``` - # mysqld --defaults-file=/etc/my.cnf --initialize - 2020-03-18T03:27:13.702385Z 0 [System] [MY-013169] [Server] /usr/local/mysql/bin/mysqld (mysqld 8.0.17) initializing of server in progress as process 34014 - 2020-03-18T03:27:24.112453Z 5 [Note] [MY-010454] [Server] A temporary password is generated for root@localhost: iNat=)#V2tZu - 2020-03-18T03:27:28.576003Z 0 [System] [MY-013170] [Server] /usr/local/mysql/bin/mysqld (mysqld 8.0.17) initializing of server has completed - ``` - - If the command output contains "initializing of server has completed", the database has been initialized. In the command output, "iNat=\)\#V2tZu" in "A temporary password is generated for root@localhost: iNat=\)\#V2tZu" is the initial password. - -4. Start the database. - - >![](./public_sys-resources/icon-caution.gif) **CAUTION:** - >Start MySQL as user **mysql** if it is the first time to start the database service. If you start MySQL as user **root**, a message will be displayed indicating that the **mysql.log** file is missing. If you start MySQL as user **mysql**, the **mysql.log** file will be generated in the **/data/mysql/log** directory. No error will be displayed if you start the database as user **root** again. - - 1. Modify the file permission as the **root** user. - - ``` - # chmod 777 /usr/local/mysql/support-files/mysql.server - ``` - - 2. Start MySQL as the **root** user. - - ``` - # cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysql - # chkconfig mysql on - ``` - - Start MySQL as user **mysql**. - - ``` - # su - mysql - $ service mysql start - ``` - -5. Log in to the database. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- Enter the initial password generated during database initialization \([3](#li15634560582)\). - >- If MySQL is installed by using an RPM package obtained from the official website, the **mysqld** file is located in the **/usr/sbin** directory. Ensure that the directory specified in the command is correct. - - ``` - $ /usr/local/mysql/bin/mysql -uroot -p -S /data/mysql/run/mysql.sock - ``` - - ![](./figures/en-us_image_0231563134.png) - -6. Configure the database accounts and passwords. - 1. After logging in to the database, change the password of user **root** for logging in to the database. - - ``` - mysql>alter user 'root'@'localhost' identified by "123456"; - ``` - - 2. Create a user **root** for all the other hosts in the domain. - - ``` - mysql>create user 'root'@'%' identified by '123456'; - ``` - - 3. Grant permissions to the user **root**. - - ``` - mysql>grant all privileges on *.* to 'root'@'%'; - mysql>flush privileges; - ``` - - ![](./figures/en-us_image_0231563135.png) - -7. Exit the database. - - Run the **\\q** or **exit** command to exit the database. - - ``` - mysql>exit - ``` - - ![](./figures/en-us_image_0231563136.png) - - -#### Uninstalling MySQL - -1. Stop the database process as the **root** user. - - ``` - $ ps -ef | grep mysql - # kill -9 PID - ``` - -2. Run the **dnf remove mysql** command as the **root** user to uninstall MySQL. - - ``` - # dnf remove mysql - ``` - - -### Managing Database Users - - - -#### Creating Users - -Run the **CREATE USER** statement to create one or more users and set corresponding passwords. - -``` -CREATE USER 'username'@'hostname' IDENTIFIED BY 'password'; -``` - -In the preceding information: - -- _username_: name of a user. -- _host_: hostname, that is, the name of the host where the user connects to the database. As a local user, you can set the parameter to **localhost**. If the host name is not specified during user creation, the host name is **%** by default, indicating a group of hosts. -- _password_: password for logging in to the server. The password can be null. If the password is null, the user can log in to the server without entering the password. This method, however, is not recommended because it provides low security. - -To use the **CREATE USER** statement, you must have the **INSERT** permission on the database or the global **CREATE USER** permission. - -After a user account is created using the **CREATE USER** statement, a record is added to the user table in the database. If the account to be created exists, an error will occur during statement execution. - -A new user has few permissions and can perform only operations that do not require permissions. For example, a user can run the **SHOW** statement to query the list of all storage engines and character sets. - -##### Example -\#Create a local user whose password is **123456** and username is **userexample1**. - -``` -> CREATE USER 'userexample1'@'localhost' IDENTIFIED BY '123456'; -``` - -\#Create a user whose password is **123456**, username is **userexample2**, and hostname is **192.168.1.100**. - -``` -> CREATE USER 'userexample2'@'192.168.1.100' IDENTIFIED BY '123456'; -``` - -#### Viewing Users - -Run the **SHOW GRANTS** or **SELECT** statement to view one or more users. - -View a specific user: - -``` -SHOW GRANTS [FOR 'username'@'hostname']; -``` - -``` -SELECT USER,HOST,PASSWORD FROM mysql.user WHERE USER='username'; -``` - -View all users: - -``` -SELECT USER,HOST FROM mysql.user; -``` - -In the preceding information: - -- _username_: name of a user. -- _hostname_: host name. - -##### Example -\#View the user **userexample1**. - -``` -> SHOW GRANTS FOR 'userexample1'@'localhost'; -``` - -\#View all users in the MySQL database. - -``` -> SELECT USER,HOST FROM mysql.user; -``` - -#### Modifying Users - -##### Modifying a Username -Run the **RENAME USER** statement to change one or more existing usernames. - -``` -RENAME USER 'oldusername'@'hostname' TO 'newusername'@'hostname'; -``` - -In the preceding information: - -- _oldusername_: original username. -- _newusername_: new username. -- _hostname_: host name. - -The **RENAME USER** statement is used to rename an existing account. If the original account does not exist in the system or the new account exists, an error will occur when the statement is executed. - -To use the **RENAME USER** statement, you must have the **UPDATE** permission on the database or the global **CREATE USER** permission. - -##### Example of Modifying a User -\# Change the username **userexample1** to **userexample2** and change the hostname to **locahost**. - -``` -> RENAME USER 'userexample1'@'localhost' TO 'userexample2'@'localhost'; -``` - -##### Modifying a User Password -Use the **SET PASSWORD** statement to modify the login password of a user. - -``` -SET PASSWORD FOR 'username'@'hostname' = 'newpassword'; -``` - -In the preceding information: - -- **FOR'**_username_**'@'**_hostname_**'**: specifies the username and hostname whose password is to be changed. This parameter is optional. -- _newpassword_: new password. - -If the **FOR** clause is not added to the **SET PASSWORD** statement, the password of the current user is changed. - -The **FOR** clause must be given in the format of **'**_username_**'@'**_hostname_**'**, where _username_ indicates the username of the account and _hostname_ indicates the hostname of the account. - -The account whose password is to be changed must exist in the system. Otherwise, an error occurs when the statement is executed. - -##### Example of Changing a User Password -\#Change the password of user **userexample** whose hostname is **locahost** to **0123456**. - -``` -> SET PASSWORD FOR 'userexample'@'localhost' = '0123456'; -``` - -#### Deleting Users - -Use the **DROP USER** statement to delete one or more user accounts and related permissions. - -``` -DROP USER 'username1'@'hostname1' [,'username2'@'hostname2']...; -``` - ->![](./public_sys-resources/icon-caution.gif) **CAUTION:** ->The deletion of users does not affect the tables, indexes, or other database objects that they have created, because the database does not record the accounts that have created these objects. - -The **DROP USER** statement can be used to delete one or more database accounts and their original permissions. - -To use the **DROP USER** statement, you must have the **DELETE** permission on the database or the global **CREATE USER** permission. - -In the **DROP USER** statement, if the hostname of an account is not specified, the hostname is **%** by default. - -##### Example -\#Delete the local user **userexample**. - -``` -> DROP USER 'userexample'@'localhost'; -``` - -#### Granting Permissions to a User - -Run the **GRANT** statement to grant permissions to a new user. - -``` -GRANT privileges ON databasename.tablename TO 'username'@'hostname'; -``` - -In the preceding information: - -- **ON** clause: specifies the object and level on which the permission is granted. -- _privileges_: indicates the operation permissions of a user, such as **SELECT**, INSERT, and **UPDATE**. To grant all permissions to a user, use **ALL**. -- _databasename_: database name. -- _tablename_: table name. -- **TO** clause: sets the user password and specifies the user to whom the permission is granted. -- _username_: name of a user. -- _hostname_: host name. - -To grant the user the permission to operate all databases and tables, use asterisks \(\*\), for example, **\*.\***. - -If you specify a password for an existing user in the **TO** clause, the new password will overwrite the original password. - -If the permission is granted to a non-existent user, a **CREATE USER** statement is automatically executed to create the user, but the password must be specified for the user. - -##### Example -\#Grant the **SELECT** and **INSERT** permissions to local user **userexample**. - -``` -> GRANT SELECT,INSERT ON *.* TO 'userexample'@'localhost'; -``` - -#### Deleting User Permissions - -Run the **REVOKE** statement to delete the permissions of a user, but the user will not be deleted. - -``` -REVOKE privilege ON databasename.tablename FROM 'username'@'hostname'; -``` - -The parameters in the **REVOKE** statement are the same as those in the **GRANT** statement. - -To use the **REVOKE** statement, you must have the global **CREATE USER** or **UPDATE** permission for the database. - -##### Example -\#Delete the **INSERT** permission of local user **userexample**. - -``` -> REVOKE INSERT ON *.* FROM 'userexample'@'localhost'; -``` - -### Managing Databases - - -#### Creating a Database - -Run the **CREATE DATABASE** statement to create a database. - -``` -CREATE DATABASE databasename; -``` - -In the preceding command, _databasename_ can be replaced with the database name, which is case insensitive. - -##### Example -\#Create a database named **databaseexample**. - -``` -> CREATE DATABASE databaseexample; -``` - -#### Viewing a Database - -Run the **SHOW DATABASES** statement to view a database. - -``` -SHOW DATABASES; -``` - -##### Example -\#View all databases. - -``` -> SHOW DATABASES; -``` - -#### Selecting a Database - -Generally, you need to select a target database before creating or querying a table. Use the **USE** statement to select a database. - -``` -USE databasename; -``` - -In the preceding command, _databasename_ indicates the database name. - -##### Example -\#Select the **databaseexample** database. - -``` -> USE databaseexample; -``` - -#### Deleting a Database - -Run the **DROP DATABASE** statement to delete a database. - ->![](./public_sys-resources/icon-caution.gif) **CAUTION:** ->Exercise caution when deleting a database. Once a database is deleted, all tables and data in the database will be deleted. - -``` -DROP DATABASE databasename; -``` - -In the preceding command, _databasename_ indicates the database name. - -The **DROP DATABASE** command is used to delete an existing database. After this command is executed, all tables in the database are deleted, but the user permissions of the database are not automatically deleted. - -To use **DROP DATABASE**, you need the **DROP** permission on the database. - -**DROP SCHEMA** is a synonym of **DROP DATABASE**. - -##### Example -\#Delete the **databaseexample** database. - -``` -> DROP DATABASE databaseexample; -``` - -#### Backing Up a Database - -Run the **mysqldump** command as the **root** user to back up the database. - -Back up one or more tables: - -``` -mysqldump [options] databasename [tablename ...] > outfile -``` - -Back up one or more databases: - -``` -mysqldump [options] -databases databasename ... > outfile -``` - -Back up all databases: - -``` -mysqldump [options] -all-databases > outputfile -``` - -In the preceding information: - -- _databasename_: database name. -- _tablename_: name of a data table. -- _outfile_: database backup file. -- _options_: parameter option of the **mysqldump** command. Multiple parameters can be separated by spaces. The common parameters of the **mysqldump** command are as follows: - - **-u, \-\-user**= _username_: specifies the username. - - **-p, \-\-password**\[= _password_\]: specifies the password. - - **-P, \-\-port**= _portnumber_: specifies the port number. - - **-h, \-\-host**= _hostname_: specifies the hostname. - - **-r, \-\-result-file**= _filename_: saves the export result to a specified file, which is equivalent to **\>**. - - **-t**: backs up data only. - - **-d**: backs up the table structure only. - - -##### Example -\#Back up all the databases of user **root** on port **3306** of the host whose IP address is **192.168.202.144** to the **alldb.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 --all-databases > alldb.sql -``` - -\#Back up the db1 database of user **root** on port **3306** of the host whose IP address is **192.168.202.144** to the **db1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 --databases db1 > db1.sql -``` - -\#Back up the tb1 table of the db1 database of user **root** on port **3306** of the host whose IP address is **192.168.202.144** to the **db1tb1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 db1 tb1 > db1tb1.sql -``` - -\#Back up only the table structure of the db1 database of user **root** on port **3306** of the host whose IP address is **192.168.202.144** to the **db1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 -d db1 > db1.sql -``` - -\#Back up only the table structure of the db1 database of user **root** on port **3306** of the host whose IP address is **192.168.202.144** to the **db1.sql** file. - -``` -# mysqldump -h 192.168.202.144 -P 3306 -uroot -p123456 -t db1 > db1.sql -``` - -#### Restoring a Database - -Run the **mysql** command as the **root** user to restore the database. - -Restore one or more tables: - -``` -mysql -h hostname -P portnumber -u username -ppassword databasename < infile -``` - -In the preceding information: - -- _hostname_: host name. -- _portnumber_: port number. -- _username_: name of a user. -- _password_: password. -- _databasename_: database name. -- _infile_: **outfile** parameter in the **mysqldump** command. - -##### Example -\#Restore a database. - -``` -# mysql -h 192.168.202.144 -P 3306 -uroot -p123456 -t db1 < db1.sql -``` diff --git a/docs/en/docs/Administration/trusted-computing.md b/docs/en/docs/Administration/trusted-computing.md deleted file mode 100644 index 914ab62bcd2a109e1544f440ff1bcc73d71678ba..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/trusted-computing.md +++ /dev/null @@ -1,638 +0,0 @@ -# Trusted Computing - - -- [Trusted Computing](#可信计算) - - [Trusted Computing Basics](#可信计算基础) - - [Trusted Computing](#可信计算-1) - - [Kernel Integrity Measurement Architecture (IMA)](#内核完整性度量ima) - - [Overview](#概述) - - [Constraints](#约束限制) - - [Application Scenarios](#使用场景) - - [Procedure](#操作指导) - - [FAQ](#faq) - - [Appendix](#附录) - - -## Trusted Computing Basics - -### Trusted Computing - -The definition of being trusted varies with international organizations. - -1. Trusted Computing Group (TCG): - - An entity that is trusted always achieves the desired goal in an expected way. - -2. International Organization for Standardization (ISO) and International Electrotechnical Commission (IEC) (1999): - - The components, operations, or processes involved in computing are predictable under any conditions and are resistant to viruses and a certain degree of physical interference. - -3. IEEE Computer Society Technical Committee on Dependable Computing: - - Being trusted means that the services provided by the computer system can be proved to be reliable, and mainly refers to the reliability and availability of the system. - -In short, being trusted means that the system operates according to a pre-determined design and policy. - -A trusted computing system consists of a root of trust, a trusted hardware platform, operating system (OS), and application. The basic idea of the system is to create a trusted computing base (TCB) first, and then establish a trust chain that covers the hardware platform, OS, and application. In the trust chain, authentication is performed from the root to the next level, extending trust level by level and building a secure and trusted computing environment. - -![](./figures/trusted_chain.png) - -Unlike the traditional security mechanism that eliminates viruses without solving the root of the problem, trusted computing adopts the whitelist mechanism to allow only authorized kernels, kernel modules, and applications to run on the system. The system will reject the execution of a program that is unknown or has been changed. - -## Kernel Integrity Measurement Architecture (IMA) - -### Overview - -#### IMA - -The integrity measurement architecture (IMA) is a subsystem in the kernel. The IMA can measure files accessed through **execve()**, **mmap()**, and **open()** systems based on user-defined policies. The measurement result can be used for **local or remote attestation**, or can be compared with an existing reference value to **control the access to files**. - -According to the Wiki definition, the function of the kernel integrity subsystem include three parts: - -- Measure: Detects accidental or malicious modifications to files, either remotely or locally. -- Appraise: Measures a file and compares it with a reference value stored in the extended attribute to control the integrity of the local file. -- Audit: Writes the measurement result into system logs for auditing. - -Figuratively, IMA measurement is an observer that only records modification without interfering in it, and IMA appraisal is more like a strict security guard that rejects any unauthorized access to programs. - -#### EVM - -The extended verification module (EVM) is used to calculate a hash value based on the security extended attributes of a file in the system, including **security.ima** and **security.selinux**. Then this value is signed by the key stored in the TPM or other trusted environments. The signature value is stored in **security.evm** and cannot be tampered with. If the value is tampered with, the signature verification fails when the file is accessed again. - -In summary, the EVM is used to provide offline protection for security extended attributes by calculating the digest of the attributes and signing and storing them in **security.evm**. - -#### IMA Digest Lists - -IMA Digest Lists are an enhancement of the original kernel integrity protection mechanism provided by openEuler. It replaces the original IMA mechanism to protect file integrity. - -Digest lists are binary data files in a special format. Each digest list corresponds to an RPM package and records the hash values of protected files (executable files and dynamic library files) in the RPM package. - -After the startup parameters are correctly configured, the kernel maintains a hash table (invisible to the user space) and provides interfaces (**digest\_list\_data** and **digest\_list\_data\_del**) that update the hash table using **securityfs**. The digest lists are signed by the private key when they are built. When uploaded to the kernel through the interface, the digest lists need to be verified by the public key in the kernel. - -![](./figures/ima_digest_list_update.png) - -When IMA appraisal is enabled, each time an executable file or dynamic library file is accessed, the hook in the kernel is invoked to calculate the hash values of the file content and extended attributes and search in the kernel hash table. If the calculated hash values match the one in the table, the file is allowed to be executed. Otherwise, the access is denied. - -![1599719649188](./figures/ima_verification.png) - -The IMA Digest Lists extension provided by the openEuler kernel provides higher security, performance, and usability than the native IMA mechanism of the kernel community, facilitating the implementation of the integrity protection mechanism in the production environment. - -- **A complete trust chain for high security** - - The native IMA mechanism requires that the file extended attribute be generated and marked in advance on the live network. When the file is accessed, the file extended attribute is used as a reference value, resulting in an incomplete trust chain. - - The IMA Digest Lists extension saves the reference digest value of the file in the kernel space. During the construction, the reference digest value of the file is carried in the released RPM package in the form of a digest list. When the RPM package is installed, the digest list is imported and the signature is verified, ensuring that the reference value comes from the software publisher and implementing a complete trust chain. - -- **Superior performance** - - The trusted platform module (TPM) chip is a low-speed chip, making the PCR extension operation a performance bottleneck in the IMA measurement scenario. To shatter this bottleneck, the Digest Lists extension reduces unnecessary PCR extension operations while ensuring security, providing 65% higher performance than the native IMA mechanism. - - In the IMA appraisal scenario, the Digest Lists extension performs signature verification in the startup phase to prevent signature verification from being performed each time the file is accessed. This helps deliver a 20% higher file access performance in the operation phase than that in the native IMA appraisal scenario. - -- **Fast deployment and smooth upgrade** - - When the native IMA mechanism is deployed for the first time or the software package is updated, you need to switch to the fix mode, manually mark the extended attributes of the file, and then restart the system to enter the enforcing mode. In this way, the installed program can be accessed normally. - - The Digest Lists extension can be used immediately after the installation is completed. In addition, the RPM package can be directly installed or upgraded in the enforcing mode without restarting the system or manually marking the extended attributes of the file. This minimizes user perception during the operation, allowing for quick deployment and smooth upgrade on the live network. - -Note: The IMA Digest Lists extension advances the signature verification of the native IMA to the startup phase. This causes the assumption that the memory in the kernel space cannot be tampered with. As a result, the IMA depends on other security mechanisms (secure startup of kernel module and dynamic memory measurement) to protect the integrity of the kernel memory. - -However, either the native IMA mechanism of the community or the IMA Digest Lists extension is only a link in the trust chain of trusted computing, and cannot ensure the system security alone. Security construction is always a systematic project that builds in-depth defense. - -### Constraints - -1. The current IMA appraisal mode can only protect immutable files in the system, including executable files and dynamic library files. -2. The IMA provides integrity measurement at the application layer. The security of the IMA depends on the reliability of the previous links. -3. Currently, the IMA does not support the import of the third-party application digest lists. -4. The startup log may contain `Unable to open file: /etc/keys/x509_ima.der`. This error is reported from the open source community and does not affect the use of the IMA digest lists feature. -5. In the ARM version, audit errors may occur when the log mode is enabled for the IMA. This occurs because the modprobe loads the kernel module before the digest lists are imported, but does not affect the normal functions. - -### Application Scenario - -#### IMA Measurement - -The purpose of IMA measurement is to detect unexpected or malicious modifications to system files. The measurement result can be used for local or remote attestation. - -If a TPM chip exists in the system, the measurement result is extended to a specified PCR register of the TPM chip. Due to the unidirectional PCR extension and the hardware security of the TPM chip, a user cannot modify the extended measurement result, thereby ensuring authenticity of the measurement result. - -The file scope and triggering conditions of IMA measurement can be configured by the user using the IMA policy. - -By default, IMA is disabled. However, the system searches for the **ima-policy** policy file in the `/etc/ima/` path. If the file is found, the system measures the files in the system based on the policy during startup. If you do not want to manually compile the policy file, you can configure the `ima_policy=tcb` in the startup parameters using the default policy. For details about more policy parameters, see the section *IMA Startup Parameters* in *Appendix*. - -You can check the currently loaded IMA policy in the `/sys/kernel/security/ima/policy` file. The IMA measurement log is located in the `/sys/kernel/security/ima/ascii_runtime_measurements` file, as shown in the following figure: - -```shell -$ head /sys/kernel/security/ima/ascii_runtime_measurements -10 ddee6004dc3bd4ee300406cd93181c5a2187b59b ima-ng sha1:9797edf8d0eed36b1cf92547816051c8af4e45ee boot_aggregate -10 180ecafba6fadbece09b057bcd0d55d39f1a8a52 ima-ng sha1:db82919bf7d1849ae9aba01e28e9be012823cf3a /init -10 ac792e08a7cf8de7656003125c7276968d84ea65 ima-ng sha1:f778e2082b08d21bbc59898f4775a75e8f2af4db /bin/bash -10 0a0d9258c151356204aea2498bbca4be34d6bb05 ima-ng sha1:b0ab2e7ebd22c4d17d975de0d881f52dc14359a7 /lib64/ld-2.27.so -10 0d6b1d90350778d58f1302d00e59493e11bc0011 ima-ng sha1:ce8204c948b9fe3ae67b94625ad620420c1dc838 /etc/ld.so.cache -10 d69ac2c1d60d28b2da07c7f0cbd49e31e9cca277 ima-ng sha1:8526466068709356630490ff5196c95a186092b8 /lib64/libreadline.so.7.0 -10 ef3212c12d1fbb94de9534b0bbd9f0c8ea50a77b ima-ng sha1:f80ba92b8a6e390a80a7a3deef8eae921fc8ca4e /lib64/libc-2.27.so -10 f805861177a99c61eabebe21003b3c831ccf288b ima-ng sha1:261a3cd5863de3f2421662ba5b455df09d941168 /lib64/libncurses.so.6.1 -10 52f680881893b28e6f0ce2b132d723a885333500 ima-ng sha1:b953a3fa385e64dfe9927de94c33318d3de56260 /lib64/libnss_files-2.27.so -10 4da8ce3c51a7814d4e38be55a2a990a5ceec8b27 ima-ng sha1:99a9c095c7928ecca8c3a4bc44b06246fc5f49de /etc/passwd -``` - -From left to right, the content of each record indicates: - -1. PCR: PCR register for extending measurement results (The default value is 10. This register is valid only when the TPM chip is installed in the system.) -2. Template hash value: hash value that is finally used for extension, combining the file content hash and the length and value of the file path -3. Template: template of the extended measurement value, for example, **ima-ng** -4. File content hash value: hash value of the measured file content -5. File path: path of the measured file - -#### IMA Appraisal - -The purpose of IMA appraisal is to control access to local files by comparing the reference value with the standard reference value. - -IMA uses the security extension attributes **security.ima** and **security.evm** to store the reference values of file integrity measurement. - -- **security.ima**: stores the hash value of the file content -- **security.evm**: stores the hash value signature of a file extended attribute - -When a protected file is accessed, the hook in the kernel is triggered to verify the integrity of the extended attributes and content of the file. - -1. Use the public key in the kernel keyring to verify the signature value in the extended attribute of the **security.evm** file, and compare this signature value with the hash value of the extended attribute of the current file. If they match, the extended attribute of the file is complete (including **security.ima**). -2. When the extended attribute of the file is complete, the system compares the extended attribute of the file **security.ima** with the digest value of the current file content. If they match, the system allows for the access to the file. - -Likewise, the file scope and trigger conditions for IMA appraisal can be configured by users using IMA policies. - -#### IMA Digest Lists - -Currently, the IMA Digest Lists extension supports the following three combinations of startup parameters: - -* IMA measurement mode: - - ```shell - ima_policy=exec_tcb ima_digest_list_pcr=11 - ``` - -* IMA appraisal log mode + IMA measurement mode: - - ```shell - ima_template=ima-sig ima_policy="exec_tcb|appraise_exec_tcb|appraise_exec_immutable" initramtmpfs ima_hash=sha256 ima_appraise=log evm=allow_metadata_writes evm=x509 ima_digest_list_pcr=11 ima_appraise_digest_list=digest - ``` - -* IMA appraisal enforcing mode + IMA measurement mode: - - ```shell - ima_template=ima-sig ima_policy="exec_tcb|appraise_exec_tcb|appraise_exec_immutable" initramtmpfs ima_hash=sha256 ima_appraise=enforce-evm evm=allow_metadata_writes evm=x509 ima_digest_list_pcr=11 ima_appraise_digest_list=digest - ``` - -### Procedure - -#### Initial Deployment in the Native IMA Scenario - -When the system is started for the first time, you need to configure the following startup parameters: - -```shell -ima_appraise=fix ima_policy=appraise_tcb -``` - -In the `fix` mode, the system can be started when no reference value is available. `appraise_tcb` corresponds to an IMA policy. For details, see *IMA Startup Parameters* in the *Appendix*. - -Next, you need to access all the files that need to be verified to add IMA extended attributes to them: - -```shell -$ time find / -fstype ext4 -type f -uid 0 -exec dd if='{}' of=/dev/null count=0 status=none \; -``` - -This process takes some time. After the command is executed, you can see the marked reference value in the extended attributes of the protected file. - -```shell -$ getfattr -m - -d /sbin/init -# file: sbin/init -security.ima=0sAXr7Qmun5mkGDS286oZxCpdGEuKT -security.selinux="system_u:object_r:init_exec_t" -``` - -Configure the following startup parameters and restart the system: - -```shell -ima_appraise=enforce ima_policy=appraise_tcb -``` - -#### Initial Deployment in the Digest Lists Scenario - -1. Set kernel parameters to enter the log mode. - - Add the following parameters to edit the `/boot/efi/EFI/euleros/grub.cfg` file: - - ```shell - ima_template=ima-sig ima_policy="exec_tcb|appraise_exec_tcb|appraise_exec_immutable" initramtmpfs ima_hash=sha256 ima_appraise=log evm=allow_metadata_writes evm=x509 ima_digest_list_pcr=11 ima_appraise_digest_list=digest - ``` - - Run the `reboot` command to restart the system and enter the log mode. In this mode, integrity check has been enabled, but the system can be started even if the check fails. - -2. Install the dependency package. - - Run the **yum** command to install **digest-list-tools** and **ima-evm-utils**. Ensure that the versions are not earlier than the following: - - ```shell - $ yum install digest-list-tools ima-evm-utils - $ rpm -qa | grep digest-list-tools - digest-list-tools-0.3.93-1.oe1.x86_64 - $ rpm -qa | grep ima-evm-utils - ima-evm-utils-1.2.1-9.oe1.x86_64 - ``` - -3. If the **plymouth** package is installed, you need to add `-a` to the end of the **cp** command in line 147 in the `/usr/libexec/plymouth/plymouth-populate-initrd` script file: - - ```shell - ... - ddebug "Installing $_src" - cp -a --sparse=always -pfL "$PLYMOUTH_SYSROOT$_src" "${initdir}/$target" - } - ``` - -4. Run `dracut` to generate **initrd** again: - - ```shell - $ dracut -f -e xattr - ``` - - Edit the `/boot/efi/EFI/euleros/grub.cfg` file by changing **ima\_appraise=log** to **ima\_appraise=enforce-evm**. - - ```shell - ima_template=ima-sig ima_policy="exec_tcb|appraise_exec_tcb|appraise_exec_immutable" initramtmpfs ima_hash=sha256 ima_appraise=enforce-evm evm=allow_metadata_writes evm=x509 ima_digest_list_pcr=11 ima_appraise_digest_list=digest - ``` - - Run the **reboot** command to complete the initial deployment. - -#### Building Digest Lists on OBS - -Open Build Service (OBS) is a compilation system that was first used for building software packages in openSUSE and supports distributed compilation of multiple architectures. - -Before building a digest list, ensure that your project contains the following RPM packages from openEuler: - -* digest-list-tools -* pesign-obs-integration -* selinux-policy -* rpm -* openEuler-rpm-config - -Add **Project Config** in the deliverable project: - -```shell -Preinstall: pesign-obs-integration digest-list-tools selinux-policy-targeted -Macros: -%__brp_digest_list /usr/lib/rpm/openEuler/brp-digest-list %{buildroot} -:Macros -``` - -* The following content is added to **Preinstall**: **digest-list-tools** for generating the digest list; **pesign-obs-integration** for generating the digest list signature; **selinux-policy-targeted**, ensuring that the SELinux label in the environment is correct when the digest list is generated. -* Define the macro **%\_\_brp\_digest\_list** in Macros. The RPM runs this macro to generate a digest list for the compiled binary file in the build phase. This macro can be used as a switch to control whether the digest list is generated in the project. - -After the configuration is completed, OBS automatically performs full build. In normal cases, the following two files are added to the software package: - -* **/etc/ima/digest\_lists/0-metadata\_list-compact-\[package name]-\[version number]** -* **/etc/ima/digest\_lists.tlv/0-metadata\_list-compact\_tlv-\[package name]-\[version number]** - -#### Building Digest Lists on Koji - -Koji is a compilation system of the Fedora community. The openEuler community will support Koji in the future. - -### FAQ - -1. Why does the system fail to be started, or commands fail to be executed, or services are abnormal after the system is started in enforcing mode? - - In enforcing mode, IMA controls file access. If the content or extended attributes of a file to be accessed are incomplete, the access will be denied. If key commands that affect system startup cannot be executed, the system cannot be started. - - Check whether the following problems exist: - - * **Check whether the digest list is added to initrd.** - - Check whether the **dracut** command is executed to add the digest list to the kernel during the initial deployment. If the digest list is not added to **initrd**, the digest list cannot be imported during startup. As a result, the startup fails. - - * **Check whether the official RPM package is used.** - - If a non-official openEuler RPM package is used, the RPM package may not carry the digest list, or the private key for signing the digest list does not match the public key for signature verification in the kernel. As a result, the digest list is not imported to the kernel. - - If the cause is not clear, enter the log mode and find the cause from the error log: - - ```shell - $ dmesg | grep appraise - ``` - -2. Why access control is not performed on system files in enforcing mode? - - When the system does not perform access control on the file as expected, check whether the IMA policy in the startup parameters is correctly configured: - - ```shell - $ cat /proc/cmdline - ...ima_policy=exec_tcb|appraise_exec_tcb|appraise_exec_immutable... - ``` - - Run the following command to check whether the IMA policy in the current kernel has taken effect: - - ```shell - $ cat /sys/kernel/security/ima/policy - ``` - - If the policy file is empty, it indicates that the policy fails to be set. In this case, the system does not perform access control. - -3. After the initial deployment is completed, do I need to manually run the **dracut** command to generate **initrd** after installing, upgrading, or uninstalling the software package? - - No. The **digest\_list.so** plug-in provided by the RPM package can automatically update the digest list at the RPM package granularity, allowing users to be unaware of the digest list. - -### Appendix - -#### Description of the IMA securityfs Interface - -The native IMA provides the following **securityfs** interfaces: - -> Note: The following interface paths are in the `/sys/kernel/security/` directory. - -| Path | Permission | Description | -| ------------------------------ | ---------- | ------------------------------------------------------------ | -| ima/policy | 600 | IMA policy interface | -| ima/ascii_runtime_measurement | 440 | IMA measurement result in ASCII code format | -| ima/binary_runtime_measurement | 440 | IMA measurement result in binary format | -| ima/runtime_measurement_count | 440 | Measurement result statistics | -| ima/violations | 440 | Number of IMA measurement result conflicts | -| evm | 660 | EVM mode, that is, the mode for verifying the integrity of extended attributes of files | - -The values of `/sys/kernel/security/evm` are as follows: - -* 0: EVM uninitialized. -* 1: Uses HMAC (symmetric encryption) to verify the integrity of extended attributes. -* 2: Uses the public key signature (asymmetric encryption) to verify the integrity of extended attributes. -* 6: Disables the integrity check of extended attributes (This mode is used for openEuler). - -The additional **securityfs** interfaces provided by the IMA Digest Lists extension are as follows: - -| Path | Permission | Description | -| ------------------------ | ---------- | ---------------------------------------------------------- | -| ima/digests_count | 440 | Total number of digests (IMA+EVM) in the system hash table | -| ima/digest_list_data | 200 | New interfaces in the digest list | -| ima/digest_list_data_del | 200 | Interfaces deleted from the digest list | - -#### IMA Policy Syntax - -Each IMA policy statement must start with an **action** represented by the keyword action and be followed by a **filtering condition**: - -- **action**: indicates the action of a policy. Only one **action** can be selected for a policy. - - > Note: You can **ignore the word action** and directly write **dont\_measure** instead of **action=dont\_measure**. - -- **func**: indicates the type of the file to be measured or authenticated. It is often used together with **mask**. Only one **func** can be selected for a policy. - - - **FILE\_CHECK** can be used only with **MAY\_EXEC**, **MAY\_WRITE**, and **MAY\_READ**. - - **MODULE\_CHECK**, **MMAP\_CHECK**, and **BPRM\_CHECK** can be used only with **MAY\_EXEC**. - - A combination without the preceding matching relationships does not take effect. - -- **mask**: indicates the operation upon which files will be measured or appraised. Only one **mask** can be selected for a policy. - -- **fsmagic**: indicates the hexadecimal magic number of the file system type, which is defined in the `/usr/include/linux/magic.h` file. - - > Note: By default, all file systems are measured unless you use the **dont\_measure/dont\_appraise** to mark a file system not to be measured. - -- **fsuid**: indicates the UUID of a system device. The value is a hexadecimal string of 16 characters. - -- **objtype**: indicates the file type. Only one file type can be selected for a policy. - - > Note: **objtype** has a finer granularity than **func**. For example, **obj\_type=nova\_log\_t** indicates the nova log file. - -- **uid**: indicates the user (represented by the user ID) who performs operations on the file. Only one **uid** can be selected for a policy. - -- **fowner**: indicates the owner (represented by the user ID) of the file. Only one **fowner** can be selected for a policy. - -The values and description of the keywords are as follows: - -| Keyword | Value | Description | -| ------------- | ------------------ | ------------------------------------------------------------ | -| action | measure | Enables IMA measurement | -| | dont_measure | Disables IMA measurement | -| | appraise | Enables IMA appraisal | -| | dont_appraise | Disables IMA appraisal | -| | audit | Enables audit | -| func | FILE_CHECK | File to be opened | -| | MODULE_CHECK | Kernel module file to be loaded | -| | MMAP_CHECK | Dynamic library file to be mapped to the memory space of the process | -| | BRPM_CHECK | File to be executed (excluding script files opened by programs such as `/bin/hash`) | -| | POLICY_CHECK | File to be loaded as a supplement to the IMA policy | -| | FIRMWARE_CHECK | Firmware to be loaded into memory | -| | DIGEST_LIST_CHECK | Digest list file to be loaded into the kernel | -| | KEXEC_KERNEL_CHECK | kexec kernel to be switched to | -| mask | MAY_EXEC | Executes a file | -| | MAY_WRITE | Writes data to a file This operation is not recommended because it is restricted by open source mechanisms such as echo and vim (the essence of modification is to create a temporary file and then rename it). The IMA measurement of **MAY\_WRITE** is not triggered each time the file is modified. | -| | MAY_READ | Reads a file | -| | MAY_APPEND | Extends file attributes | -| fsmagic | fsmagic=xxx | Hexadecimal magic number of the file system type | -| fsuuid | fsuuid=xxx | UUID of a system device. The value is a hexadecimal string of 16 characters. | -| fowner | fowner=xxx | User ID of the file owner | -| uid | uid=xxx | ID of the user who operates the file | -| obj_type | obj_type=xxx_t | File type (based on the SELinux tag) | -| pcr | pcr= | Selects the PCR used to extend the measurement values in the TPM. The default value is 10. | -| appraise_type | imasig | Signature-based IMA appraisal | -| | meta_immutable | Evaluates the extended attributes of the file based on signatures (supporting the digest list). | - -> Note: **PATH\_CHECK** is equivalent to **FILE\_CHECK**, and **FILE\_MMAP** is equivalent to **MMAP\_CHECK**. They are not mentioned in this table. - -#### IMA Native Startup Parameters - -The following table lists the kernel startup parameters of the native IMA. - -| Parameter | Value | Description | -| ---------------- | ------------ | ------------------------------------------------------------ | -| ima_appraise | off | Disables the IMA appraisal mode. The integrity check is not performed when the file is accessed and no new reference value is generated for the file. | -| | enforce | Enables the IMA appraisal enforcing mode to perform the integrity check when the file is accessed. That is, the file digest value is calculated and compared with the reference value. If the comparison fails, the file access is rejected. In this case, the IMA generates a new reference value for the new file. | -| | fix | Enables the IMA repair mode. In this mode, the reference value of a protected file can be updated. | -| | log | Enables the IMA appraisal log mode to perform the integrity check when the file is accessed. However, commands can be executed even if the check fails, and only logs are recorded. | -| ima_policy | tcb | Measures all file execution, dynamic library mapping, kernel module import, and device driver loading. The file read behavior of the root user is also measured. | -| | appraise_tcb | Evaluates all files whose owner is the root user. | -| | secure_boot | Evaluates the kernel module import, hardware driver loading, kexec kernel switchover, and IMA policies. The prerequisite is that these files have IMA signatures. | -| ima_tcb | None | Equivalent to **ima\_policy=tcb**. | -| ima_appraise_tcb | None | Equivalent to **ima\_policy=appraise\_tcb**. | -| ima_hash | sha1/md5/... | IMA digest algorithm. The default value is sha1. | -| ima_template | ima | IMA measurement extension template | -| | ima-ng | IMA measurement extension template | -| | ima-sig | IMA measurement extension template | -| integrity_audit | 0 | Basic integrity audit information (default) | -| | 1 | Additional integrity audit information | - -> Note: The **ima\_policy** parameter can specify multiple values at the same time, for example, **ima\_policy=tcb\|appraise\_tcb**. After the system is started, the IMA policy of the system is the sum of the policies for the two parameters. - -The IMA policy for the `ima_policy=tcb` startup parameter is as follows: - -``` -# PROC_SUPER_MAGIC = 0x9fa0 -dont_measure fsmagic=0x9fa0 -# SYSFS_MAGIC = 0x62656572 -dont_measure fsmagic=0x62656572 -# DEBUGFS_MAGIC = 0x64626720 -dont_measure fsmagic=0x64626720 -# TMPFS_MAGIC = 0x01021994 -dont_measure fsmagic=0x1021994 -# DEVPTS_SUPER_MAGIC=0x1cd1 -dont_measure fsmagic=0x1cd1 -# BINFMTFS_MAGIC=0x42494e4d -dont_measure fsmagic=0x42494e4d -# SECURITYFS_MAGIC=0x73636673 -dont_measure fsmagic=0x73636673 -# SELINUX_MAGIC=0xf97cff8c -dont_measure fsmagic=0xf97cff8c -# SMACK_MAGIC=0x43415d53 -dont_measure fsmagic=0x43415d53 -# CGROUP_SUPER_MAGIC=0x27e0eb -dont_measure fsmagic=0x27e0eb -# CGROUP2_SUPER_MAGIC=0x63677270 -dont_measure fsmagic=0x63677270 -# NSFS_MAGIC=0x6e736673 -dont_measure fsmagic=0x6e736673 -measure func=MMAP_CHECK mask=MAY_EXEC -measure func=BPRM_CHECK mask=MAY_EXEC -measure func=FILE_CHECK mask=MAY_READ uid=0 -measure func=MODULE_CHECK -measure func=FIRMWARE_CHECK -``` - -The IMA policy for the `ima_policy=tcb_appraise` startup parameter is as follows: - -``` -# PROC_SUPER_MAGIC = 0x9fa0 -dont_appraise fsmagic=0x9fa0 -# SYSFS_MAGIC = 0x62656572 -dont_appraise fsmagic=0x62656572 -# DEBUGFS_MAGIC = 0x64626720 -dont_appraise fsmagic=0x64626720 -# TMPFS_MAGIC = 0x01021994 -dont_appraise fsmagic=0x1021994 -# RAMFS_MAGIC -dont_appraise fsmagic=0x858458f6 -# DEVPTS_SUPER_MAGIC=0x1cd1 -dont_appraise fsmagic=0x1cd1 -# BINFMTFS_MAGIC=0x42494e4d -dont_appraise fsmagic=0x42494e4d -# SECURITYFS_MAGIC=0x73636673 -dont_appraise fsmagic=0x73636673 -# SELINUX_MAGIC=0xf97cff8c -dont_appraise fsmagic=0xf97cff8c -# SMACK_MAGIC=0x43415d53 -dont_appraise fsmagic=0x43415d53 -# NSFS_MAGIC=0x6e736673 -dont_appraise fsmagic=0x6e736673 -# CGROUP_SUPER_MAGIC=0x27e0eb -dont_appraise fsmagic=0x27e0eb -# CGROUP2_SUPER_MAGIC=0x63677270 -dont_appraise fsmagic=0x63677270 -appraise fowner=0 -``` - -The IMA policy for the `ima_policy=secure_boot` startup parameter is as follows: - -``` -appraise func=MODULE_CHECK appraise_type=imasig -appraise func=FIRMWARE_CHECK appraise_type=imasig -appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig -appraise func=POLICY_CHECK appraise_type=imasig -``` - -#### IMA Digest List Startup Parameters - -The kernel startup parameters added to the IMA digest list feature are as follows: - -| Parameter | Value | Description | -| ------------------------ | ----------------------- | ------------------------------------------------------------ | -| integrity | 0 | Disables the IMA feature (by default) | -| | 1 | Enables the IMA feature | -| ima_appraise | off | Disables the IMA appraisal mode | -| | enforce-evm | Enables the IMA appraisal forced mode to perform the integrity check when the file is accessed and control the access. | -| ima_appraise_digest_list | digest | When the EVM is disabled, the abstract list is used for IMA appraise. The abstract list protects both the content and extended attributes of the file. | -| | digest-nometadata | If the EVM digest value does not exist, the integrity check is performed only based on the IMA digest value (the file extended attribute is not protected). | -| evm | fix | Allows for any modification to the extended attribute (even if the modification causes the failure to verify the integrity of the extended attribute). | -| | ignore | Allowed to modify the extended attribute only when it does not exist or is incorrect. | -| ima_policy | exec_tcb | IMA measurement policy. For details, see the following policy description. | -| | appraise_exec_tcb | IMA appraisal policy. For details, see the following policy description. | -| | appraise_exec_immutable | IMA appraisal policy. For details, see the following policy description. | -| ima_digest_list_pcr | 11 | Uses PCR 11 instead of PCR 10, and uses only the digest list for measurement. | -| | +11 | The PCR 10 measurement is reserved. When the TPM chip is available, the measurement result is written to the TPM chip. | -| initramtmpfs | None | Adds the support for **tmpfs**. | - - - -The IMA policy for the `ima_policy=exec_tcb` startup parameter is as follows: - -``` -dont_measure fsmagic=0x9fa0 -dont_measure fsmagic=0x62656572 -dont_measure fsmagic=0x64626720 -dont_measure fsmagic=0x1cd1 -dont_measure fsmagic=0x42494e4d -dont_measure fsmagic=0x73636673 -dont_measure fsmagic=0xf97cff8c -dont_measure fsmagic=0x43415d53 -dont_measure fsmagic=0x27e0eb -dont_measure fsmagic=0x63677270 -dont_measure fsmagic=0x6e736673 -measure func=MMAP_CHECK mask=MAY_EXEC -measure func=BPRM_CHECK mask=MAY_EXEC -measure func=MODULE_CHECK -measure func=FIRMWARE_CHECK -measure func=POLICY_CHECK -measure func=DIGEST_LIST_CHECK -measure parser -``` - -The IMA policy for the `ima_policy=appraise_exec_tcb` startup parameter is as follows: - -``` -appraise func=MODULE_CHECK appraise_type=imasig -appraise func=FIRMWARE_CHECK appraise_type=imasig -appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig -appraise func=POLICY_CHECK appraise_type=imasig -appraise func=DIGEST_LIST_CHECK appraise_type=imasig -dont_appraise fsmagic=0x9fa0 -dont_appraise fsmagic=0x62656572 -dont_appraise fsmagic=0x64626720 -dont_appraise fsmagic=0x858458f6 -dont_appraise fsmagic=0x1cd1 -dont_appraise fsmagic=0x42494e4d -dont_appraise fsmagic=0x73636673 -dont_appraise fsmagic=0xf97cff8c -dont_appraise fsmagic=0x43415d53 -dont_appraise fsmagic=0x6e736673 -dont_appraise fsmagic=0x27e0eb -dont_appraise fsmagic=0x63677270 -``` - -The IMA policy for the `ima_policy=appraise_exec_immutable` startup parameter is as follows: - -``` -appraise func=BPRM_CHECK appraise_type=imasig appraise_type=meta_immutable -appraise func=MMAP_CHECK -appraise parser appraise_type=imasig -``` - -#### IMA Kernel Compilation Options - -The native IMA provides the following compilation options: - -| Compilation Option | Description | -| -------------------------------- | ------------------------------------------------------- | -| CONFIG_INTEGRITY | IMA/EVM compilation switch | -| CONFIG_INTEGRITY_SIGNATURE | Enables IMA signature verification | -| CONFIG_INTEGRITY_ASYMMETRIC_KEYS | Enables IMA asymmetric signature verification | -| CONFIG_INTEGRITY_TRUSTED_KEYRING | Enables IMA/EVM key ring | -| CONFIG_INTEGRITY_AUDIT | Compiles the IMA audit module | -| CONFIG_IMA | IMA compilation switch | -| CONFIG_IMA_WRITE_POLICY | Allows updating the IMA policy in the running phase | -| CONFIG_IMA_MEASURE_PCR_IDX | Allows specifying the PCR number of the IMA measurement | -| CONFIG_IMA_LSM_RULES | Allows configuring LSM rules | -| CONFIG_IMA_APPRAISE | IMA appraisal compilation switch | -| IMA_APPRAISE_BOOTPARAM | Enables IMA appraisal startup parameters | -| CONFIG_EVM | EVM compilation switch | - -The additional compilation options provided by the IMA Digest Lists extension are as follows: - -| Compilation Option | Description | -| ------------------ | ----------------------------------- | -| CONFIG_DIGEST_LIST | Enables the IMA Digest List feature | - -#### IMA Performance Reference Data - -The following figure compares the performance when IMA is disabled, native IMA is enabled, and IMA digest list is enabled. - -![img](./figures/ima_performance.png) \ No newline at end of file diff --git a/docs/en/docs/Administration/user-and-user-group-management.md b/docs/en/docs/Administration/user-and-user-group-management.md deleted file mode 100644 index 9a88a1214db0d532dfb73fd44793c09a312159a1..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/user-and-user-group-management.md +++ /dev/null @@ -1,327 +0,0 @@ -# User and User Group Management - -In Linux, each common user has an account, including the user name, password, and home directory. There are also special users created for specific purposes, and the most important special user is the admin account whose default user name is root. In addition, Linux provides user groups so that each user belongs to at least one group, facilitating permission management. - -The control of users and user groups is a core element of openEuler security management. This topic introduces the user and group management commands and explains how to assign privileges to common users in graphical user interface and on command lines. - - -- [User and User Group Management](#user-and-user-group-management) - - [Managing Users](#managing-users) - - [Adding a User](#adding-a-user) - - [Modifying a User Account](#modifying-a-user-account) - - [Deleting a User](#deleting-a-user) - - [Granting Rights to a Common User](#granting-rights-to-a-common-user) - - [Managing User Groups](#managing-user-groups) - - [Adding a User Group](#adding-a-user-group) - - [Modifying a User Group](#modifying-a-user-group) - - [Deleting a User Group](#deleting-a-user-group) - - [Adding a User to a Group or Removing a User from a Group](#adding-a-user-to-a-group-or-removing-a-user-from-a-group) - - [Changing the Current Group of a User to a Specified Group](#changing-the-current-group-of-a-user-to-a-specified-group) - - - -## Managing Users - -### Adding a User - -#### useradd Command -Run the **useradd** command as the user **root** to add user information to the system. In the command, _options_ indicates related parameters and _username_ indicates the user name. - -``` -useradd [options] username -``` - -#### User Information Files -The following files contain user account information: - -- /etc/passwd: user account information -- /etc/shadow file: user account encryption information -- /etc/group file: group information -- /etc/default/useradd: default configurations -- /etc/login.defs: system wide settings -- /etc/skel: default directory that holds initial configuration files - -#### Example -For example, to create a user named userexample, run the following command as the user **root**: - -``` -# useradd userexample -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If no prompt is displayed, the user is successfully created. After the user is created, run the **passwd** command to assign a password to the user. A new account without a password will be banned. - -To view information about the new user, run the **id** command: - -``` -# id userexample -uid=502(userexample) gid=502(userexample) groups=502(userexample) -``` - -To change the password of the userexample, run the following command: - -``` -# passwd userexample -``` - - -It is recommended that the new user password meet the complexity requirements. The password complexity requirements are as follows: - -1. A password must contain at least eight characters. -2. A password must contain at least three of the following types: uppercase letters, lowercase letters, digits, and special characters. -3. A password must be different from the account name. -4. A password cannot contain words in the dictionary. - - Querying a dictionary - In the installed openEuler environment, you can run the following command to export the dictionary library file **dictionary.txt**, and then check whether the password is in the dictionary. - ``` - cracklib-unpacker /usr/share/cracklib/pw_dict > dictionary.txt - ``` - - Modifying a dictionary - 1. Modify the exported dictionary library file, and then run the following command to update the dictionary library: - ``` - # create-cracklib-dict dictionary.txt - ``` - 2. Run the following command to add another dictionary file **custom.txt** to the original dictionary library. - ``` - # create-cracklib-dict dictionary.txt custom.txt - ``` - -Then, enter the password and confirm it as prompted: - -``` -# passwd userexample -Changing password for user userexample. -New password: -Retype new password: -passwd: all authentication tokens updated successfully. -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If the command output contains **BAD PASSWORD: The password fails the dictionary check - it is too simplistic/sytematic**, the password is too simple and needs to be reset. - -### Modifying a User Account - -#### Changing a Password -Common users can change their passwords using the **passwd** command. Only the admin is allowed to use the **passwd username** command to change passwords for other users. - -#### Changing User's Login Shell -Common users can use the **chsh** command to change their login shell. Only the admin is allowed to run the **chsh username** command to change login shell for other users. - -Users can also run the **usermod** command as the user **root** to modify the shell information. In the command, _new_shell_path_ indicates the target shell path, and _username_ indicates the user name to be modified. Change them based on the site requirements. - -``` -usermod -s new_shell_path username -``` - -For example, to change the shell of userexample to csh, run the following command: - -``` -# usermod -s /bin/csh userexample -``` - -#### Changing the Home Directory -- To change the home directory, run the following command as the user **root**. In the command, _new\_home\_directory_ indicates the created target home directory, and _username_ indicates the user name to be changed. Change them based on the site requirements. - - ``` - usermod -d new_home_directory username - ``` - -- To move the content in the current home directory to a new one, run the usermod command with the -m option: - - ``` - usermod -d new_home_directory -m username - ``` - - -#### Changing a UID -To change the user ID, run the following command as the user **root**. In the command, _UID_ indicates the target user ID and _username_ indicates the user name. Change them based on the site requirements. - -``` -usermod -u UID username -``` - -The usermod command can change a user's UID in all files and directories under the user's home directory. However, for files outside the user's home directory, their owners can only be changed using the **chown** command. - -#### Changing Account Expiry Date -If the shadow password is used, run the following command as the user **root** to change the validity period of an account. In the command, _MM_, _DD_, and _YY_ indicate the month, day, and year, respectively, and _username_ indicates the user name. Change them based on the site requirements. - -``` -usermod -e MM/DD/YY username -``` - -### Deleting a User - -Run the **userdel** command as the user **root** to delete an existing user. - -For example, run the following command to delete user Test: - -``` -# userdel Test -``` - -If you also need to delete the user's home directory and all contents in the directory, run the **userdel** command with the -r option to delete them recursively. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->You are not advised to directly delete a user who has logged in to the system. To forcibly delete a user, run the **userdel -f** _Test_ command. - -### Granting Rights to a Common User - -The **sudo** command allows common users to execute commands that can be executed only by administrator accounts. - -The **sudo** command allows the user specified in the **/etc/sudoers** file to execute the administrator account commands. For example, an authorized common user can run: - -``` -sudo /usr/sbin/useradd newuserl -``` - -The **sudo** command can specify a common user that has been added to the **/etc/sudoers** file to process tasks as required. - -The information configured in the **/etc/sudoers** file is as follows: - -- Blank lines or comment lines starting with **\#**: Have no specific functions. -- Optional host alias lines: Create the name of a host list. The lines must start with **Host\_Alias**. The host names in the list must be separated by commas \(,\). For example: - - ``` - Host_Alias linux=ted1,ted2 - ``` - - **ted1** and **ted2** are two host names, which can be called **linux**. - - -- Optional user alias lines: Create the name of a user list. The lines must start with **User\_Alias**. The user names in the list must be separated by commas \(,\). The user alias lines have the same format as the host alias lines. -- Optional command alias lines: Create the name of a command list. The lines must start with **Cmnd\_Alias**. The commands in the list must be separated by commas \(,\). -- Optional running mode alias lines: Create the name of a user list. The difference is that such alias can enable a user in the list to run the **sudo** command. -- Necessary declaration lines for user access: - - The declaration syntax for user access is as follows: - - ``` - user host = [ run as user ] command list - ``` - - Set the user to a real user name or a defined user alias, and set the host to a real host name or a defined host alias. By default, all the commands executed by sudo are executed as user **root**. If you want to use another account, you can specify it. **command list** is either a command list separated by commas \(,\) or a defined command alias. For example: - - ``` - ted1 ted2=/sbin/shutdown - ``` - - In this example, ted1 can run the shutdown command on ted2. - - ``` - newuser1 ted1=(root) /usr/sbin/useradd,/usr/sbin/userdel - ``` - - This indicates that newuser1 on the ted1 host can run the **useradd** and **userdel** commands as the user **root**. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- You can define multiple aliases in a line and separate them with colons \(:\). - >- You can add an exclamation mark \(!\) before a command or a command alias to make the command or the command alias invalid. - >- There are two keywords: ALL and NOPASSWD. ALL indicates all files, hosts, or commands, and NOPASSWD indicates that no password is required. - >- By modifying user access, you can change the access permission of a common user to be the same as that of the user **root**. Then, you can grant rights to the common user. - - -The following is an example of the **sudoers** file: - -``` -#sudoers files -#User alias specification -User_Alias ADMIN=ted1:POWERUSER=globus,ted2 -#user privilege specification -ADMIN ALL=ALL -POWERUSER ALL=ALL,!/bin/su -``` - -In the preceding information: - -- User\_Alias ADMIN=ted1:POWERUSER=globus,ted2 - - Two aliases ADMIN and POWERUSER are defined. - -- ADMIN ALL=ALL - - ADMIN can run all commands as the user **root** on all hosts. - -- POWERUSER ALL=ALL,!/bin/su - - POWERUSER can run all commands except the **su** command as the user **root** on all hosts. - - -## Managing User Groups - -### Adding a User Group - -#### groupadd Command -Run the **groupadd** command as the **root** user to add user group information to the system. In the command, _options_ indicates related parameters and _groupname_ indicates the group name. - -``` -groupadd [options] groupname -``` - -#### User Group Information Files -The following files contain user group information: - -- /etc/gshadow file: user group encryption information -- /etc/group file: group information -- /etc/login.defs: system wide settings - -#### Example -For example, to create a user group named groupexample, run the following command as the **root** user: - -``` -# groupadd groupexample -``` - -### Modifying a User Group - -#### Changing a GID -To change the user group ID, run the following command as the **root** user. In the command, _GID_ indicates the target user group ID and _groupname_ indicates the user group name. Change them based on the site requirements. - -``` -groupmod -g GID groupname -``` - -#### Changing a User Group Name -To change the user group name, run the following command as the **root** user. In the command, _newgroupname_ indicates the user group new name and _oldgroupname_ indicates the user group name. Change them based on the site requirements. - -``` -groupmod -n newgroupname oldgroupname -``` - -### Deleting a User Group - -Run the **groupdel** command as the **root** user to delete an existing user group. - -For example, run the following command to delete user group Test: - -``` -# groupdel Test -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The user's primary group cannot be directly deleted. To forcibly delete a user's primary group, run the **groupdel -f** _Test_ command. - -### Adding a User to a Group or Removing a User from a Group - -Run the **gpasswd** command as the **root** user to add a user to a group or remove a user from a group. - -For example, run the following command to add the user userexample to the user group Test: - -``` -# gpasswd -a userexample Test -``` - -For example, run the following command to remove the user userexample from the user group Test: - -``` -# gpasswd -d userexample Test -``` - -### Changing the Current Group of a User to a Specified Group - -If a user belongs to multiple user groups, run the **newgrp** command to switch the user to another user group after logging in to the system. Then, the user has the permission of other user groups. - -For example, run the following command to change the current group of the user userexample to the user group Test: - -``` -$ newgrp Test -``` diff --git a/docs/en/docs/Administration/using-the-dnf-to-manage-software-packages.md b/docs/en/docs/Administration/using-the-dnf-to-manage-software-packages.md deleted file mode 100644 index 21b303b28ab6e98e98c13ae3b8617038ebfa4a28..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/using-the-dnf-to-manage-software-packages.md +++ /dev/null @@ -1,579 +0,0 @@ -# Using the DNF to Manage Software Packages - -DNF is a Linux software package management tool used to manage RPM software packages. The DNF can query software package information, obtain software packages from a specified software library, automatically process dependencies to install or uninstall software packages, and update the system to the latest available version. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- DNF is fully compatible with YUM and provides YUM-compatible command lines and APIs for extensions and plug-ins. ->- You must have the administrator rights to use the DNF. All commands in this chapter must be executed by the administrator. - - -- [Using the DNF to Manage Software Packages](#using-the-dnf-to-manage-software-packages) - - [Configuring the DNF](#configuring-the-dnf) - - [The DNF Configuration File](#the-dnf-configuration-file) - - [Creating a Local Software Repository](#creating-a-local-software-repository) - - [Adding, Enabling, and Disabling Software Sources](#adding-enabling-and-disabling-software-sources) - - [Managing Software Package](#managing-software-package) - - [Searching for Software Packages](#searching-for-software-packages) - - [Listing Software Packages](#listing-software-packages) - - [Displaying RPM Package Information](#displaying-rpm-package-information) - - [Installing an RPM Package](#installing-an-rpm-package) - - [Downloading Software Packages](#downloading-software-packages) - - [Deleting a Software Package](#deleting-a-software-package) - - [Managing Software Package Groups](#managing-software-package-groups) - - [Listing Software Package Groups](#listing-software-package-groups) - - [Displaying the Software Package Group Information](#displaying-the-software-package-group-information) - - [Installation Software Package Group](#installation-software-package-group) - - [Deleting a Software Package Group](#deleting-a-software-package-group) - - [Check and Update](#check-and-update) - - [Checking for Update](#checking-for-update) - - [Upgrade](#upgrade) - - [Updating All Packages and Their Dependencies](#updating-all-packages-and-their-dependencies) - - - -## Configuring the DNF - - - -### The DNF Configuration File - -The main configuration file of the DNF is /etc/dnf/dnf.conf which consists of two parts: - -- The **main** part in the file stores the global settings of the DNF. - -- The **repository** part in the file stores the settings of the software source. You can add one or more **repository** sections to the file. - -In addition, the /etc/yum.repos.d directory stores one or more repo source files, which define different repositories. - -You can configure a software source by either directly configuring the /etc/dnf/dnf.conf file or configuring the .repo file in the /etc/yum.repos.d directory. - -#### Configuring the main Part -The /etc/dnf/dnf.conf file contains the **main** part. The following is an example of the configuration file: - -``` -[main] -gpgcheck=1 -installonly_limit=3 -clean_requirements_on_remove=True -best=True -``` - -Common options are as follows: - -**Table 1** main parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

cachedir

-

Cache directory for storing RPM packages and database files.

-

keepcache

-

The options are 1 and 0, indicating whether to cache the RPM packages and header files that have been successfully installed. The default value is 0, indicating that the RPM packages and header files are not cached.

-

debuglevel

-

Sets debugging information generated by the DNF. The value ranges from 0 to 10. A larger value indicates more detailed debugging information. The default value is 2. The value 0 indicates that the debug information is not displayed.

-

clean_requirements_on_remove

-

Deletes the dependency items that are no longer used during DNF removal. If the software package is installed through the DNF instead of the explicit user request, the software package can be deleted only through clean_requirements_on_remove, that is, the software package is introduced as a dependency item. The default value is True.

-

best

-

The system always attempts to install the latest version of the upgrade package. If the latest version cannot be installed, the system displays the cause and stops the installation. The default value is True.

-

obsoletes

-

The options are 1 and 0, indicating whether to allow the update of outdated RPM packages. The default value is 1, indicating that the update is allowed.

-

gpgcheck

-

The options are 1 and 0, indicating whether to perform GPG verification. The default value is 1, indicating that verification is required.

-

plugins

-

The options are 1 and 0, indicating that the DNF plug-in is enabled or disabled. The default value is 1, indicating that the DNF plug-in is enabled.

-

installonly_limit

-

Sets the number of packages that can be installed at the same time by running the installonlypkgs command. The default value is 3. You are advised not to decrease the value.

-
- -#### Configuring the repository Part - -The repository part allows you to customize openEuler software source repositories. The name of each repository must be unique. Otherwise, conflicts may occur. You can configure a software source by either directly configuring the /etc/dnf/dnf.conf file or configuring the .repo file in the /etc/yum.repos.d directory. - -- Configuring the /etc/dnf/dnf.conf file - - The following is a minimum configuration example of the \[repository\] section: - - ``` - [repository] - name=repository_name - baseurl=repository_url - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >openEuler provides an online image source at [https://repo.openeuler.org/](https://repo.openeuler.org/). For example, if the openEuler 21.03 version is aarch64, the **baseurl** can be set to [https://repo.openeuler.org/openEuler-21.03/OS/aarch64/](https://repo.openeuler.org/openEuler-21.03/OS/aarch64/). - - Common options are as follows: - - **Table 2** repository parameter description - - - - - - - - - - - -

Parameter

-

Description

-

name=repository_name

-

Name string of a software repository.

-

baseurl=repository_url

-

Address of the software repository.

-
  • Network location using the HTTP protocol, for example, http://path/to/repo
  • Network location using the FTP protocol, for example, ftp://path/to/repo
  • Local path: for example, file:///path/to/local/repo
-
- - -- Configuring the .repo file in the /etc/yum.repos.d directory - openEuler provides multiple repo sources for users online. For details about the repo sources, see [System Installation](./../Releasenotes/installing-the-os.md.html). - - For example, run the following command as the **root** user to add the openeuler repo source to the openEuler.repo file. - - ``` - # vi /etc/yum.repos.d/openEuler.repo - ``` - - ``` - [OS] - name=openEuler-$releasever - OS - baseurl=https://repo.openeuler.org/openEuler-21.03/OS/$basearch/ - enabled=1 - gpgcheck=1 - gpgkey=https://repo.openeuler.org/openEuler-21.03/OS/$basearch/RPM-GPG-KEY-openEuler - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - > - **enabled** indicates whether to enable the software source repository. The value can be **1** or **0**. The default value is **1**, indicating that the software source repository is enabled. - > - **gpgkey** is the public key used to verify the signature. - - -#### Displays the Current Configuration -- To display the current configuration information, run the following command: - - ``` - dnf config-manager --dump - ``` - -- To display the configuration of a software source, query the repo id: - - ``` - dnf repolist - ``` - - Run the following command to display the software source configuration of the corresponding ID. In the command, _repository_ indicates the repository ID. - - ``` - dnf config-manager --dump repository - ``` - -- You can also use a global regular expression to display all matching configurations. - - ``` - dnf config-manager --dump glob_expression - ``` - - -### Creating a Local Software Repository - -To create a local repository of software sources, perform the following steps. - -1. Install the createrepo software package. Run the following command as the root user: - - ``` - dnf install createrepo - ``` - -2. Copy the required software packages to a directory, for example, /mnt/local\_repo/. -3. Run the following command to create a software source: - - ``` - createrepo /mnt/local_repo - ``` - - -### Adding, Enabling, and Disabling Software Sources - -This section describes how to add, enable, and disable the software source repository by running the **dnf config-manager** command. - -#### Adding Software Source -To define a new software repository, you can add the repository part to the /etc/dnf/dnf.conf file or add the .repo file to the /etc/yum.repos.d/ directory. You are advised to add the .repo file. Each software source has its own .repo file. The following describes how to add the .repo file. - -To add such a source to your system, run the following command as the user **root**. After the command is executed, the corresponding .repo file is generated in the **/etc/yum.repos.d/** directory. In the command, _repository\_url_ indicates the repo source address. For details, see [Table 2](#en-us_topic_0151921080_t2df9dceb0ff64b2f8db8ec5cd779792a). - -``` -dnf config-manager --add-repo repository_url -``` - -#### Enabling a Software Repository -To enable the software source, run the following command as the user **root**. In the command, _repository_ indicates the repository ID in the new .repo file. You can run the **dnf repolist** command to query the repository ID. - -``` -dnf config-manager --set-enable repository -``` - -You can also use a global regular expression to enable all matching software sources. In the command, _glob\_expression_ indicates the regular expression used to match multiple repository IDs. - -``` -dnf config-manager --set-enable glob_expression -``` - -#### Disabling a Software Repository -To disable a software source, run the following command as the user **root**: - -``` -dnf config-manager --set-disable repository -``` - -You can also use a global regular expression to disable all matching software sources. - -``` -dnf config-manager --set-disable glob_expression -``` - -## Managing Software Package - -The DNF enables you to query, install, and delete software packages. - -### Searching for Software Packages -You can search for the required RPM package by its name, abbreviation, or description. The command is as follows: - -``` -dnf search term -``` - -The following is an example: - -``` -$ dnf search httpd -========================================== N/S matched: httpd ========================================== -httpd.aarch64 : Apache HTTP Server -httpd-devel.aarch64 : Development interfaces for the Apache HTTP server -httpd-manual.noarch : Documentation for the Apache HTTP server -httpd-tools.aarch64 : Tools for use with the Apache HTTP Server -libmicrohttpd.aarch64 : Lightweight library for embedding a webserver in applications -mod_auth_mellon.aarch64 : A SAML 2.0 authentication module for the Apache Httpd Server -mod_dav_svn.aarch64 : Apache httpd module for Subversion server -``` - -### Listing Software Packages -To list all installed and available RPM packages in the system, run the following command: - -``` -dnf list all -``` - -To list a specific RPM package in the system, run the following command: - -``` -dnf list glob_expression... -``` - -The following is an example: - -``` -$ dnf list httpd -Available Packages -httpd.aarch64 2.4.34-8.h5.oe1 Local -``` - -### Displaying RPM Package Information -To view information about one or more RPM packages, run the following command: - -``` -dnf info package_name... -``` - -The following is a command example: - -``` -$ dnf info httpd -Available Packages -Name : httpd -Version : 2.4.34 -Release : 8.h5.oe1 -Arch : aarch64 -Size : 1.2 M -Repo : Local -Summary : Apache HTTP Server -URL : http://httpd.apache.org/ -License : ASL 2.0 -Description : The Apache HTTP Server is a powerful, efficient, and extensible - : web server. -``` - -### Installing an RPM Package -To install a software package and all its dependencies that have not been installed, run the following command as the user **root**: - -``` -dnf install package_name -``` - -You can also add software package names to install multiple software packages at the same time. Add the **strict=False** parameter to the /etc/dnf/dnf.conf configuration file and run the **dnf** command to add --setopt=strict=0. Run the following command as the user **root**: - -``` -dnf install package_name package_name... --setopt=strict=0 -``` - -The following is an example: - -``` -# dnf install httpd -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If the RPM package fails to be installed, see [Installation Failure Caused by Software Package Conflict, File Conflict, or Missing Software Package](./faqs.html#installation-failure-caused-by-software-package-conflict-file-conflict-or-missing-software-package). - -### Downloading Software Packages -To download the software package using the DNF, run the following command as the user **root**: - -``` -dnf download package_name -``` - -If you need to download the dependency packages that are not installed, add **\-\-resolve**. The command is as follows: - -``` -dnf download --resolve package_name -``` - -The following is an example: - -``` -# dnf download --resolve httpd -``` - -### Deleting a Software Package -To uninstall the software package and related dependent software packages, run the following command as the user **root**: - -``` -dnf remove package_name... -``` - -The following is an example: - -``` -# dnf remove totem -``` - -## Managing Software Package Groups - -A software package set is a group of software packages that serve a common purpose, for example, a system tool set. You can use the DNF to install or delete software package groups, improving operation efficiency. - -### Listing Software Package Groups -The summary parameter can be used to list the number of all installed software package groups, available groups, and available environment groups in the system. The command is as follows: - -``` -dnf groups summary -``` - -The following is an example: - -``` -# dnf groups summary -Last metadata expiration check: 0:11:56 ago on Sat 17 Aug 2019 07:45:14 PM CST. -Available Groups: 8 -``` - -To list all software package groups and their group IDs, run the following command: - -``` -dnf group list -``` - -The following is an example: - -``` -# dnf group list -Last metadata expiration check: 0:10:32 ago on Sat 17 Aug 2019 07:45:14 PM CST. -Available Environment Groups: - Minimal Install - Custom Operating System - Server -Available Groups: - Development Tools - Graphical Administration Tools - Headless Management - Legacy UNIX Compatibility - Network Servers - Scientific Support - Security Tools - System Tools - -``` - -### Displaying the Software Package Group Information -To list the mandatory and optional packages contained in a software package group, run the following command: - -``` -dnf group info glob_expression... -``` - -The following is an example of displaying the Development Tools information: - -``` -# dnf group info "Development Tools" -Last metadata expiration check: 0:14:54 ago on Wed 05 Jun 2019 08:38:02 PM CST. - -Group: Development Tools - Description: A basic development environment. - Mandatory Packages: - binutils - glibc-devel - make - pkgconf - pkgconf-m4 - pkgconf-pkg-config - rpm-sign - Optional Packages: - expect -``` - -### Installation Software Package Group -Each software package group has its own name and corresponding group ID. You can use the software package group name or its ID to install the software package. - -To install a software package group, run the following command as the user **root**: - -``` -dnf group install group_name -``` - -``` -dnf group install groupid -``` - -For example, to install the software package group of Development Tools, run the following command: - -``` -# dnf group install "Development Tools" -``` - -``` -# dnf group install development -``` - -### Deleting a Software Package Group -To uninstall a software package group, you can use the group name or ID to run the following command as the user **root**: - -``` -dnf group remove group_name -``` - -``` -dnf group remove groupid -``` - -For example, to delete the software package group of Development Tools, run the following command: - -``` -# dnf group remove "Development Tools" -``` - -``` -# dnf group remove development -``` - -## Check and Update - -You can use the DNF to check whether any software package in your system needs to be updated. You can use the DNF to list the software packages to be updated. You can choose to update all packages at a time or update only specified packages. - -### Checking for Update -To list all currently available updates, run the following command: - -``` -dnf check-update -``` - -The following is an example: - -``` -# dnf check-update -Last metadata expiration check: 0:02:10 ago on Sun 01 Sep 2019 11:28:07 PM CST. - -anaconda-core.aarch64 19.31.123-1.14 updates -anaconda-gui.aarch64 19.31.123-1.14 updates -anaconda-tui.aarch64 19.31.123-1.14 updates -anaconda-user-help.aarch64 19.31.123-1.14 updates -anaconda-widgets.aarch64 19.31.123-1.14 updates -bind-libs.aarch64 32:9.9.4-29.3 updates -bind-libs-lite.aarch64 32:9.9.4-29.3 updates -bind-license.noarch 32:9.9.4-29.3 updates -bind-utils.aarch64 32:9.9.4-29.3 updates -... -``` - -### Upgrade -To upgrade a single software package, run the following command as the user **root**: - -``` -dnf update package_name -``` - -For example, to upgrade the RPM package, run the following command: - -``` -# dnf update anaconda-gui.aarch64 -Last metadata expiration check: 0:02:10 ago on Sun 01 Sep 2019 11:30:27 PM CST. -Dependencies Resolved -================================================================================ - Package Arch Version Repository Size -================================================================================ -Updating: - anaconda-gui aarch64 19.31.123-1.14 updates 461 k - anaconda-core aarch64 19.31.123-1.14 updates 1.4 M - anaconda-tui aarch64 19.31.123-1.14 updates 274 k - anaconda-user-help aarch64 19.31.123-1.14 updates 315 k - anaconda-widgets aarch64 19.31.123-1.14 updates 748 k - -Transaction Summary -================================================================================ -Upgrade 5 Package - -Total download size: 3.1 M -Is this ok [y/N]: -``` - -Similarly, to upgrade a software package group, run the following command as the user **root**: - -``` -dnf group update group_name -``` - -### Updating All Packages and Their Dependencies -To update all packages and their dependencies, run the following command as the user **root**: - -``` -dnf update -``` diff --git a/docs/en/docs/Administration/using-the-kae.md b/docs/en/docs/Administration/using-the-kae.md deleted file mode 100644 index 6f696e68734a8b603d698a97d2d99c8cd9381d63..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/using-the-kae.md +++ /dev/null @@ -1,743 +0,0 @@ -# Using the KAE - - -- [Using the KAE](#using-the-kae) - - [Overview](#overview) - - [Application Scenarios](#application-scenarios) - - [Installing, Running, and Uninstalling the KAE](#installing-running-and-uninstalling-the-kae) - - [Installing the Accelerator Software Packages](#installing-the-accelerator-software-packages) - - [Upgrading the Accelerator Software Packages](#upgrading-the-accelerator-software-packages) - - [Uninstalling the Accelerator Software Packages](#uninstalling-the-accelerator-software-packages) - - [Querying Logs](#querying-logs) - - [Acceleration Engine Application](#acceleration-engine-application) - - [Example Code for the KAE](#example-code-for-the-kae) - - [Using the KAE in the OpenSSL Configuration File openssl.cnf](#using-the-kae-in-the-openssl-configuration-file-opensslcnf) - - [Troubleshooting](#troubleshooting) - - [Initialization Failure](#initialization-failure) - - [Failed to Identify Accelerator Devices After the Acceleration Engine Is Installed](#failed-to-identify-accelerator-devices-after-the-acceleration-engine-is-installed) - - [Failed to Upgrade the Accelerator Drivers](#failed-to-upgrade-the-accelerator-drivers) - - -## Overview - -Kunpeng Accelerator Engine \(KAE\) is a software acceleration library of openEuler, which provides hardware acceleration engine function on the Kunpeng 920 processor. The engine supports symmetric encryption, asymmetric encryption, and digital signature. It is ideal for accelerating SSL/TLS applications, and can significantly reduce processor consumption and improve processor efficiency. In addition, users can quickly migrate existing services through the standard OpenSSL interface. - -The KAE supports the following algorithms: - -- Digest algorithm SM3, which supports the asynchronous mode. -- Symmetric encryption algorithm SM4, which supports asynchronous, CTR, XTS, and CBC modes. - -- Symmetric encryption algorithm AES, which supports asynchronous, ECB, CTR, XTS, and CBC modes. -- Asymmetric algorithm RSA, which supports asynchronous mode, and key sizes 1024, 2048, 3072, and 4096. -- Key negotiation algorithm DH, which supports asynchronous mode, and key sizes 768, 1024, 1536, 2048, 3072, and 4096. - -## Application Scenarios - -The KAE applies to the following scenarios, as shown in [Table 1](#table11915824163418). - -**Table 1** Application scenarios - - - - - - - - - - - - - - - - - - - -

Scenario

-

Data

-

Big data

-

Stream data

-

Data encryption

-

Block data

-

Intelligent security protection

-

Video stream data

-

Web service

-

Handshake connections

-
- -## Installing, Running, and Uninstalling the KAE - - - -### Installing the Accelerator Software Packages - - - -#### Preparing for Installation - -##### Environment Requirements -- The accelerator engine is enabled on TaiShan 200 servers. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- You need to import the accelerator license. For details, see section "License Management" in the [TaiShan Rack Server iBMC \(V500 or Later\) User Guide](https://support.huawei.com/enterprise/en/doc/EDOC1100121685/426cffd9?idPath=7919749|9856522|21782478|8060757). ->- If the accelerator is used in the physical machine scenario, the SMMU must be disabled. For details, see the [TaiShan 200 Server BIOS Parameter Reference](https://support.huawei.com/enterprise/en/doc/EDOC1100088647). - -- CPU: Kunpeng 920 -- OS: openEuler-21.03-aarch64-dvd.iso - -##### KAE Software Description -**Table 2** RPM software packages of the KAE - - - - - - - - - - - - - - - - -

Software Package

-

Description

-

kae_driver-version number-1.OS type.aarch64.rpm

-

Accelerator driver, including the uacce.ko, hisi_qm.ko, hisi_sec2.ko, and hisi_hpre.ko kernel modules.

-

Support: SM3, SM4, AES, RSA, and DH algorithms.

-

libwd-version number-1.OS type.aarch64.rpm

-

Coverage: libwd.so dynamic link library.

-

It provides interfaces for the KAE.

-

libkae-version number-1.OS type.aarch64.rpm

-

Dependency: libwd RPM package.

-

Coverage: libkae.so dynamic library.

-

Support: SM3, SM4, AES, RSA, and DH algorithms.

-
- -#### Installing the Accelerator Software Package - -##### Prerequisites -- The remote SSH login tool has been installed on the local PC. -- The openEuler OS has been installed. -- The RPM tool is running properly. -- OpenSSL 1.1.1a or a later version has been installed. - - You can run the following commands to query the version number of OpenSSL: - - - openssl version - - -##### Procedure -1. Log in to the openEuler OS CLI as user **root**. -2. Create a directory for storing accelerator engine software packages. -3. Use SSH to copy all accelerator engine software package to the created directory. -4. In the directory, run the **rpm -ivh** command to install the accelerator engine software packages. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Install the **libwd** package first because the **libkae** package installation depends on the **libwd** package. - - ``` - rpm -ivh uacce*.rpm hisi*.rpm libwd-*.rpm libkae*.rpm - ``` - - ``` - Verifying... ################################# [100%] - Preparing... ################################# [100%] - checking installed modules - uacce modules start to install - Updating / installing... - 1:uacce-1.2.10-4.oe1 ################################# [ 14%] - uacce modules installed - 2:libwd-1.2.10-3.oe1 ################################# [ 29%] - 3:libkae-1.2.10-3.oe1 ################################# [ 43%] - checking installed modules - hisi_hpre modules start to install - 4:hisi_hpre-1.2.10-4.oe1 ################################# [ 57%] - hisi_hpre modules installed - checking installed modules - hisi_rde modules start to install - 5:hisi_rde-1.2.10-4.oe1 ################################# [ 71%] - hisi_rde modules installed - checking installed modules - hisi_sec2 modules start to install - 6:hisi_sec2-1.2.10-4.oe1 ################################# [ 86%] - hisi_sec2 modules installed - checking installed modules - hisi_zip modules start to install - 7:hisi_zip-1.2.10-4.oe1 ################################# [100%] - hisi_zip modules installed - ``` - -5. Run the **rpm -qa** command to check whether the accelerator software packages have been installed properly. Run the **rpm -ql** command to check whether files in the software packages are correct. The following is an example: - - ``` - rpm -qa|grep -E "hisi|uacce|libwd|libkae" - ``` - - ``` - hisi_rde-1.2.10-4.oe1.aarch64 - hisi_sec2-1.2.10-4.oe1.aarch64 - libkae-1.2.10-3.oe1.aarch64 - hisi_hpre-1.2.10-4.oe1.aarch64 - uacce-1.2.10-4.oe1.aarch64 - libwd-1.2.10-3.oe1.aarch64 - hisi_zip-1.2.10-4.oe1.aarch64 - ``` - - ``` - rpm -ql uacce hisi* libwd* libkae - ``` - - ``` - /lib/modules/4.19.90-2003.4.0.0036.oe1.aarch64/extra/hisi_qm.ko - /lib/modules/4.19.90-2003.4.0.0036.oe1.aarch64/extra/uacce.ko - /etc/modprobe.d/hisi_hpre.conf - /lib/modules/4.19.90-2003.4.0.0036.oe1.aarch64/extra/hisi_hpre.ko - /etc/modprobe.d/hisi_rde.conf - /lib/modules/4.19.90-2003.4.0.0036.oe1.aarch64/extra/hisi_rde.ko - /etc/modprobe.d/hisi_sec2.conf - /lib/modules/4.19.90-2003.4.0.0036.oe1.aarch64/extra/hisi_sec2.ko - /etc/modprobe.d/hisi_zip.conf - /lib/modules/4.19.90-2003.4.0.0036.oe1.aarch64/extra/hisi_zip.ko - /usr/include/warpdrive/config.h - /usr/include/warpdrive/include/uacce.h - /usr/include/warpdrive/smm.h - /usr/include/warpdrive/wd.h - /usr/include/warpdrive/wd_bmm.h - /usr/include/warpdrive/wd_cipher.h - /usr/include/warpdrive/wd_comp.h - /usr/include/warpdrive/wd_dh.h - /usr/include/warpdrive/wd_digest.h - /usr/include/warpdrive/wd_rsa.h - /usr/lib64/libwd.so.1.2.10 - /usr/local/lib/engines-1.1/libkae.so.1.2.10 - ``` - -6. Restart the system or run commands to manually load the accelerator engine drivers to the kernel in sequence, and check whether the drivers are successfully loaded. - - ``` - # modprobe uacce - # lsmod | grep uacce - # modprobe hisi_qm - # lsmod | grep hisi_qm - # modprobe hisi_qm - # modprobe hisi_sec2 # Loads the hisi_sec2 driver to the kernel based on the configuration file in /etc/modprobe.d/hisi_sec2.conf. - # modprobe hisi_hpre # Loads the hisi_hpre driver to the kernel based on the configuration file in /etc/modprobe.d/hisi_hpre.conf. - ``` - - -##### Setting Environment Variables -Run the following command to export the environment variable \(If you have specified the installation directory, use the actual installation directory instead of **/usr/local**\): - -``` -export OPENSSL_ENGINES=/usr/local/lib/engines-1.1 -``` - -##### Performing the Post-Installation Check -Run the **rpm -qa** command to check whether the accelerator engine software packages are successfully installed. - -If the command output contains _software package name_**-**_version number_**-**, the software package is successfully installed. The following is an example: - -``` -rpm -qa|grep -E "hisi|uacce|libwd|libkae" -``` - -``` -hisi_rde-1.2.10-4.oe1.aarch64 -hisi_sec2-1.2.10-4.oe1.aarch64 -libkae-1.2.10-3.oe1.aarch64 -hisi_hpre-1.2.10-4.oe1.aarch64 -uacce-1.2.10-4.oe1.aarch64 -libwd-1.2.10-3.oe1.aarch64 -hisi_zip-1.2.10-4.oe1.aarch64 -``` - -#### Performing Required Operations After Installation - -##### Testing the OpenSSL Accelerator Engine - -You can run the following commands to test some accelerator functions. - -- Use the OpenSSL software algorithm to test the RSA performance. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed rsa2048 - ... - sign verify sign/s verify/s - rsa 2048 bits 0.001384s 0.000035s 724.1 28365.8. - ``` - -- Use the KAE to test the RSA performance. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -engine kae rsa2048 - .... - sign verify sign/s verify/s - rsa 2048 bits 0.000355s 0.000022s 2819.0 45478.4 - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->\#After KAE acceleration, the signature performance is improved from 724.1 sign/s to 2819 sign/s. - -- Use the OpenSSL software algorithm to test the asynchronous RSA performance. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -async_jobs 36 rsa2048 - .... - sign verify sign/s verify/s - rsa 2048 bits 0.001318s 0.000032s 735.7 28555 - ``` - -- Use the KAE to test the asynchronous RSA performance. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -engine kae -elapsed -async_jobs 36 rsa2048 - .... - sign verify sign/s verify/s - rsa 2048 bits 0.000018s 0.000009s 54384.1 105317.0 - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->\#After KAE acceleration, the asynchronous RSA signature performance is improved from 735.7 sign/s to 54384.1 sign/s. - -- Use the OpenSSL software algorithm to test the performance of the SM4 CBC mode. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -evp sm4-cbc - You have chosen to measure elapsed time instead of user CPU time. - .... - Doing sm4-cbc for 3s on 10240 size blocks: 2196 sm4-cbc's in 3.00s .... - type 51200 bytes 102400 bytes1048576 bytes2097152 bytes4194304 bytes8388608 bytes - sm4-cbc 82312.53k 85196.80k 85284.18k 85000.85k 85284.18k 85261.26k - ``` - -- Use the KAE to test the SM4 CBC mode performance. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -engine kae -evp sm4-cbc - engine "kae" set. - You have chosen to measure elapsed time instead of user CPU time. - ... - Doing sm4-cbc for 3s on 1048576 size blocks: 11409 sm4-cbc's in 3.00s - ... - type 51200 bytes 102400 bytes1048576 bytes2097152 bytes4194304 bytes8388608 bytes - sm4-cbc 383317.33k 389427.20k 395313.15k 392954.73k 394264.58k 394264.58k - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->After KAE acceleration, the SM4 CBC mode performance is improved from 82312.53 kbit/s to 383317.33 kbit/s when the input data block size is 8 MB. - -- Use the OpenSSL software algorithm to test the SM3 mode performance. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -evp sm3 - You have chosen to measure elapsed time instead of user CPU time. - Doing sm3 for 3s on 102400 size blocks: 1536 sm3's in 3.00s - .... - type 51200 bytes 102400 bytes1048576 bytes2097152 bytes4194304 bytes8388608 bytes - sm3 50568.53k 52428.80k 52428.80k 52428.80k 52428.80k 52428.80k - ``` - -- Use the KAE to test the SM3 mode performance. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -engine kae -evp sm3 - engine "kae" set. - You have chosen to measure elapsed time instead of user CPU time. - Doing sm3 for 3s on 102400 size blocks: 19540 sm3's in 3.00s - .... - type 51200 bytes 102400 bytes 1048576 bytes 2097152 bytes 4194304 bytes 8388608 bytes - sm3 648243.20k 666965.33k 677030.57k 678778.20k 676681.05k 668292.44k - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->After KAE acceleration, the SM3 algorithm performance is improved from 52428.80 kbit/s to 668292.44 kbit/s when the input data block size is 8 MB. - -- Use the OpenSSL software algorithm to test the asynchronous performance of the AES algorithm in CBC mode. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -evp aes-128-cbc -async_jobs 4 - You have chosen to measure elapsed time instead of user CPU time. - Doing aes-128-cbc for 3s on 51200 size blocks: 65773 aes-128-cbc's in 3.00s - Doing aes-128-cbc for 3s on 102400 size blocks: 32910 aes-128-cbc's in 3.00s - .... - type 51200 bytes 102400 bytes1048576 bytes2097152 bytes4194304 bytes8388608 bytes - aes-128-cbc 1122525.87k 1123328.00k 1120578.22k 1121277.27k 1119879.17k 1115684.86k - ``` - -- Use the KEA engine to test the asynchronous performance of the AES algorithm in CBC mode. - - ``` - linux-rmw4:/usr/local/bin # ./openssl speed -elapsed -evp aes-128-cbc -async_jobs 4 -engine kae - engine "kae" set. - You have chosen to measure elapsed time instead of user CPU time. - Doing aes-128-cbc for 3s on 51200 size blocks: 219553 aes-128-cbc's in 3.00s - Doing aes-128-cbc for 3s on 102400 size blocks: 117093 aes-128-cbc's in 3.00s - .... - type 51200 bytes 102400 bytes1048576 bytes2097152 bytes4194304 bytes8388608 bytes - aes-128-cbc 3747037.87k 3996774.40k 1189085.18k 1196774.74k 1196979.11k 1199570.94k - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- The AES algorithm supports only asynchronous usage when the data length is 256 KB or less. ->- After KAE acceleration, the AES algorithm performance is improved from 1123328.00 kbit/s to 3996774.40 kbit/s when the input data block size is 100 KB. - -### Upgrading the Accelerator Software Packages - -#### Scenario -You can run the **rpm -Uvh** command to upgrade the accelerator software. - -#### Procedure -1. Download the latest accelerator engine software packages from the openEuler community. -2. Use SSH to log in to the Linux CLI as user **root**. -3. Save the downloaded software packages to a directory. -4. In the directory, run the **rpm -Uvh** command to upgrade the accelerator driver package and engine library package. The following is an example: - - The command and output are as follows: - - ![](./figures/en-us_image_0231143189.png) - - ![](./figures/en-us_image_0231143191.png) - -5. Run the **rpm -qa** command to check whether the upgrade is successful. Ensure that the queried version is the latest version. - - ![](./figures/en-us_image_0231143193.png) - - ![](./figures/en-us_image_0231143195.png) - -6. Restart the system or run the following commands to manually uninstall the drivers of the earlier version, load the drivers of the latest version, and check whether the new drivers are successfully loaded. - - ``` - Uninstall the existing drivers. - # lsmod | grep uacce - uacce 262144 3 hisi_hpre,hisi_sec2,hisi_qm - # - # rmmod hisi_hpre - # rmmod hisi_sec2 - # rmmod hisi_qm - # rmmod uacce - # lsmod | grep uacce - # - Load the new drivers.# modprobe uacce - # modprobe hisi_qm# modprobe hisi_sec2 #Loads the hisi_sec2 driver to the kernel based on the configuration file in /etc/modprobe.d/hisi_sec2.conf. - # modprobe hisi_hpre #Loads the hisi_hpre driver to the kernel based on the configuration file in /etc/modprobe.d/hisi_hpre.conf. - # lsmod | grep uacce - uacce 36864 3 hisi_sec2,hisi_qm,hisi_hpre - ``` - - -### Uninstalling the Accelerator Software Packages - -#### Scenario -You do not need the accelerator engine software or you want to install new accelerator engine software. - -#### Procedure -1. Use SSH to log in to the Linux CLI as user **root**. -2. Restart the system or run commands to manually uninstall the accelerator drivers loaded to the kernel, and check whether the drivers are successfully uninstalled. - - ``` - # lsmod | grep uacce - uacce 36864 3 hisi_sec2,hisi_qm,hisi_hpre - # rmmod hisi_hpre - # rmmod hisi_sec2 - # rmmod hisi_qm - # rmmod uacce - # lsmod | grep uacce - # - ``` - -3. Run the **rpm -e** command to uninstall the accelerator engine software packages. The following is an example: - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Due to the dependency relationships, the **libkae** package must be uninstalled before the **libwd** package. - - ![](./figures/en-us_image_0231143196.png) - - ![](./figures/en-us_image_0231143197.png) - -4. Run the **rpm -qa |grep** command to check whether the uninstallation is successful. - - ![](./figures/en-us_image_0231143198.png) - - -## Querying Logs - -[Table 3](#table52821836) lists log information related to the accelerator engine. - -**Table 3** Log information - - - - - - - - - - - - - - - - -

Directory

-

File

-

Description

-

/var/log/

-

kae.log

-

By default, the log level of the OpenSSL engine log is error. To set the log level, perform the following procedure:

-
  1. Run export KAE_CONF_ENV=/var/log/.
  2. Create the kae.cnf file in /var/log/.
  3. In the kae.cnf file, configure the content as follows:

    [LogSection]

    -

    debug_level=error #Value: none, error, info, warning or debug

    -
-
NOTE:

In normal cases, you are advised not to enable the info or debug log level. Otherwise, the accelerator performance will deteriorate.

-
-

/var/log/

-

message/syslog

-
  • Kernel logs are stored in the /var/log/message directory.
-
NOTE:

Alternatively, you can run the dmesg > /var/log/dmesg.log command to collect driver and kernel logs.

-
-
- -## Acceleration Engine Application - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If you have not purchased the engine license, you are advised not to use the KAE engine to invoke the corresponding algorithms. Otherwise, the performance of the OpenSSL encryption algorithm may be affected. - - - -### Example Code for the KAE - -``` -#include - -#include - -/* OpenSSL headers */ - -#include - -#include - -#include - -#include - -int main(int argc, char **argv) - -{ - - /* Initializing OpenSSL */ - - SSL_load_error_strings(); - - ERR_load_BIO_strings(); - - OpenSSL_add_all_algorithms(); - - /*You can use ENGINE_by_id Function to get the handle of the Huawei Accelerator Engine*/ - - ENGINE *e = ENGINE_by_id("kae"); - - /* Enable the accelerator asynchronization function. This parameter is optional. The value 0 indicates disabled, and the value 1 indicates enabled. The asynchronous function is enabled by default. */ - - ENGINE_ctrl_cmd_string(e, "KAE_CMD_ENABLE_ASYNC", "1", 0) - - ENGINE_init(e); - - RSA*rsa=RSA_new_method(e);#Specify the engine for RSA encryption and decryption. - - /*The user code*/ - - ...... - -; - - ENGINE_free(e); - -; - -} -``` - -### Using the KAE in the OpenSSL Configuration File openssl.cnf - -Create the **openssl.cnf** file and add the following configuration information to the file: - -``` -openssl_conf=openssl_def -[openssl_def] -engines=engine_section -[engine_section] -kae=kae_section -[kae_section] -engine_id=kae -dynamic_path=/usr/local/lib/engines-1.1/kae.so -KAE_CMD_ENABLE_ASYNC=1 #The value 0 indicates that the asynchronous function is disabled. The value 1 indicates that the asynchronous function is enabled. The asynchronous function is enabled by default. -default_algorithms=ALL -init=1 -``` - -Export the environment variable **OPENSSL\_CONF**. - -``` -export OPENSSL_CONF=/home/app/openssl.cnf #Path for storing the openssl.cnf file -``` - -The following is an example of the OpenSSL configuration file: - -``` -#include - -#include - -/* OpenSSL headers */ - -#include - -#include - -#include - -#include - -int main(int argc, char **argv) - -{ - - /* Initializing OpenSSL */ - - SSL_load_error_strings(); - - ERR_load_BIO_strings(); - -#Load openssl configure - -OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CONFIG, NULL); OpenSSL_add_all_algorithms(); - - /*You can use ENGINE_by_id Function to get the handle of the Huawei Accelerator Engine*/ - - ENGINE *e = ENGINE_by_id("kae"); - - /*The user code*/ - - ...... - -; - - ENGINE_free(e); - -; -} -``` - -## Troubleshooting - - - -### Initialization Failure - -#### Symptom -The accelerator engine is not completely loaded due to an initialization failure. - -#### Solution -1. Check whether the accelerator drivers are loaded successfully. Specifically, run the **lsmod** command to check whether uacce.ko, qm.ko, sgl.ko, hisi\_sec2.ko, hisi\_hpre.ko, hisi\_zip.ko, and hisi\_rde.ko exist. - - ``` - # lsmod | grep uacce - uacce 262144 2 hisi_hpre,hisi_qm,hisi_sec2,hisi_zip,hisi_rde - ``` - -2. Check whether the accelerator engine library exists in **/usr/lib64** \(directory for RPM installation\) or **/usr/local/lib** \(directory for source code installation\) and the OpenSSL installation directory, and check whether the correct soft link is established. - - ``` - [root@localhost home]# ll /usr/local/lib/engines-1.1/ |grep kae - #Check whether the KAE has been correctly installed and whether a soft link has been established. If yes, the displayed information is as follows: - # ll /usr/local/lib/engines-1.1/ |grep kae - lrwxrwxrwx. 1 root root 22 Nov 12 02:33 kae.so -> kae.so.1.0.1 - lrwxrwxrwx. 1 root root 22 Nov 12 02:33 kae.so.0 -> kae.so.1.0.1 - -rwxr-xr-x. 1 root root 112632 May 25 2019 kae.so.1.0.1 - [[root@localhost home]# - [root@localhost home]# ll /usr/lib64/ | grep libwd - #Check whether libwd has been correctly installed and whether a soft link has been established. If yes, the displayed information is as follows: - lrwxrwxrwx. 1 root root 14 Nov 12 02:33 libwd.so -> libwd.so.1.0.1 - lrwxrwxrwx. 1 root root 14 Nov 12 02:33 libwd.so.0 -> libwd.so.1.0.1 - -rwxr-xr-x. 1 root root 137120 May 25 2019 libwd.so.1.0.1 - [root@localhost home]# - ``` - -3. Check whether the path of the OpenSSL engine library can be exported by running the **export** command. - - ``` - # echo $OPENSSL_ENGINES - # export OPENSSL_ENGINES=/usr/local/lib/engines-1.1 - # echo $OPENSSL_ENGINES - /usr/local/lib/engines-1.1 - ``` - - -### Failed to Identify Accelerator Devices After the Acceleration Engine Is Installed - -#### Symptom -After the acceleration engine is installed, the accelerator devices cannot be identified. - -#### Solution -1. Check whether the device exists in the virtual file system. Normally, the following accelerator devices are displayed: - - ``` - # ls -al /sys/class/uacce/ - total 0 - lrwxrwxrwx. 1 root root 0 Nov 14 03:45 hisi_hpre-2 -> ../../devices/pci0000:78/0000:78:00.0/0000:79:00.0/uacce/hisi_hpre-2 - lrwxrwxrwx. 1 root root 0 Nov 14 03:45 hisi_hpre-3 -> ../../devices/pci0000:b8/0000:b8:00.0/0000:b9:00.0/uacce/hisi_hpre-3 - lrwxrwxrwx. 1 root root 0 Nov 17 22:09 hisi_rde-4 -> ../../devices/pci0000:78/0000:78:01.0/uacce/hisi_rde-4 - lrwxrwxrwx. 1 root root 0 Nov 17 22:09 hisi_rde-5 -> ../../devices/pci0000:b8/0000:b8:01.0/uacce/hisi_rde-5 - lrwxrwxrwx. 1 root root 0 Nov 14 08:39 hisi_sec-0 -> ../../devices/pci0000:74/0000:74:01.0/0000:76:00.0/uacce/hisi_sec-0 - lrwxrwxrwx. 1 root root 0 Nov 14 08:39 hisi_sec-1 -> ../../devices/pci0000:b4/0000:b4:01.0/0000:b6:00.0/uacce/hisi_sec-1 - lrwxrwxrwx. 1 root root 0 Nov 17 22:09 hisi_zip-6 -> ../../devices/pci0000:74/0000:74:00.0/0000:75:00.0/uacce/hisi_zip-6 - lrwxrwxrwx. 1 root root 0 Nov 17 22:09 hisi_zip-7 -> ../../devices/pci0000:b4/0000:b4:00.0/0000:b5:00.0/uacce/hisi_zip-7 - ``` - -2. If you want to use the HPRE device but the device is not found in [1](#li1760055514614), check whether the accelerator software is correctly installed by referring to [Failed to Upgrade the Accelerator Drivers](#failed-to-upgrade-the-accelerator-drivers). -3. If the accelerator software is correctly installed, run the **lspci** command to check whether the physical device exists. - - ``` - # lspci | grep HPRE - 79:00.0 Network and computing encryption device: Huawei Technologies Co., Ltd. HiSilicon HPRE Engine (rev 21) - b9:00.0 Network and computing encryption device: Huawei Technologies Co., Ltd. HiSilicon HPRE Engine (rev 21) - ## lspci | grep SEC - 76:00.0 Network and computing encryption device: Huawei Technologies Co., Ltd. HiSilicon SEC Engine (rev 21) - b6:00.0 Network and computing encryption device: Huawei Technologies Co., Ltd. HiSilicon SEC Engine (rev 21) - ## lspci | grep RDE - 78:01.0 RAID bus controller: Huawei Technologies Co., Ltd. HiSilicon RDE Engine (rev 21) - b8:01.0 RAID bus controller: Huawei Technologies Co., Ltd. HiSilicon RDE Engine (rev 21) - ## lspci | grep ZIP - 75:00.0 Processing accelerators: Huawei Technologies Co., Ltd. HiSilicon ZIP Engine (rev 21) - b5:00.0 Processing accelerators: Huawei Technologies Co., Ltd. HiSilicon ZIP Engine (rev 21) - # - ``` - -4. If no physical device is found in [3](#li1560012551369), perform the following operations: - - Check whether the accelerator license has been imported. If no, import the accelerator license. For details, see "License Management" in the [TaiShan Rack Server iBMC \(V500 or Later\) User Guide](https://support.huawei.com/enterprise/en/doc/EDOC1100121685/426cffd9?idPath=7919749|9856522|21782478|8060757). After the accelerator license is imported, power off and restart the BMC to enable the license. - - Check whether the BMC and BIOS versions support the accelerator feature. - - -### Failed to Upgrade the Accelerator Drivers - -#### Symptom -After the accelerator drivers are upgraded, the driver version is not changed after the system is restarted. - -#### Possible Cause -Before the accelerator drivers are upgraded, the system upgrades other driver packages. These driver packages may update the boot file system initramfs, and update the accelerator drivers to initramfs before upgrade. For example, if the NIC driver is updated or initramfs is manually updated, the system loads the accelerator drivers from initramfs first during restart. - -#### Solution -After the accelerator drivers are upgraded, run the **dracut \-\-force** command to update initramfs again. diff --git a/docs/en/docs/Administration/viewing-system-information.md b/docs/en/docs/Administration/viewing-system-information.md deleted file mode 100644 index 729b2eb5589a177ef03c822640f4f0f3ecc21f17..0000000000000000000000000000000000000000 --- a/docs/en/docs/Administration/viewing-system-information.md +++ /dev/null @@ -1,42 +0,0 @@ -# Viewing System Information - -- Run the following command to view the system information: - - ``` - $ cat /etc/os-release - ``` - - For example, the command and output are as follows: - - ``` - $ cat /etc/os-release - NAME="openEuler" - VERSION="21.03" - ID="openEuler" - VERSION_ID="21.03" - PRETTY_NAME="openEuler 21.03" - ANSI_COLOR="0;31" - ``` - - -- View system resource information. - - Run the following command to view the CPU information: - - ``` - $ lscpu - ``` - - Run the following command to view the memory information: - - ``` - $ free - ``` - - Run the following command to view the disk information: - - ``` - $ fdisk -l - ``` - - diff --git a/docs/en/docs/ApplicationDev/FAQ.md b/docs/en/docs/ApplicationDev/FAQ.md deleted file mode 100644 index 8b355eeed2a4e1db834a8383829dfebc77752f89..0000000000000000000000000000000000000000 --- a/docs/en/docs/ApplicationDev/FAQ.md +++ /dev/null @@ -1,27 +0,0 @@ -# FAQ - - - -- [FAQ](#faq) - - [The self-compilation of some applications that depend on the **java-devel** package fails.](#部分依赖java-devel的应用程序自编译失败) - - - -## The self-compilation of some applications that depend on the **java-devel** package fails. - -### Symptom - -The self-compilation of some applications that depend on java-devel fails when the rpmbuild command is executed. - -### Cause Analysis - -To provide OpenJDK features that are updated and compatible with Java applications, the openEuler provides OpenJDK of multiple versions, such as OpenJDK 1.8.0 and OpenJDK 11. The compilation of some applications depends on the **java-devel** package. When the **java-devel** package is installed, the system installs java-11-openjdk of a later version by default. As a result, the compilation of these applications fails. - -### Solution - -You need to run the following command to install java-1.8.0-openjdk and then run the **rpmbuild** command to perform self-compilation: - -``` -# yum install java-1.8.0-openjdk - -``` \ No newline at end of file diff --git a/docs/en/docs/ApplicationDev/application-development.md b/docs/en/docs/ApplicationDev/application-development.md deleted file mode 100644 index 7c67238c6ced29f21db9c79f6f75b65f2581cf3f..0000000000000000000000000000000000000000 --- a/docs/en/docs/ApplicationDev/application-development.md +++ /dev/null @@ -1,95 +0,0 @@ -# Application Development Guide - -This document describes the common tools used for application development and guides users to develop applications based on openEuler. - -## Overview - -This document describes the following four parts to guide users to use openEuler and develop code based on openEuler. - -- Install and use the GCC compiler in the openEuler operating system \(OS\), and complete the development, compilation, and execution of simple code. -- In the openEuler OS, use the JDK built-in tool to compile and execute code. -- Install IntelliJ IDEA in the openEuler OS for Java development. -- Create an RPM package locally or using the Open Build Service \(OBS\). - -## Intended Audience - -This document is intended for all users who use the openEuler OS for code development. You are expected to have the following experience or capabilities: - -- Have basic knowledge of the Linux OS. -- Know how to use Linux command lines. - -## Symbol Conventions - -The symbols that may be found in this document are defined as follows. - - - - - - - - - - - - - - -

Symbol

-

Description

-

![](./figures/en-us_image_0229243712.png)

-

Indicates a potentially hazardous situation which, if not avoided, could result in equipment damage, data loss, performance deterioration, or unanticipated results.

-

NOTICE is used to address practices not related to personal injury.

-

![](./figures/en-us_image_0229243671.png)

-

Supplements the important information in the main text.

-

NOTE is used to address information not related to personal injury, equipment damage, and environment deterioration.

-
- -## Command Conventions - -**Table 1** Command conventions - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Format

-

Description

-

Boldface

-

Command keywords, which remain unchanged in the commands, are in boldface.

-

Italic

-

Command parameters, which are replaced with actual values in the commands, are in italic.

-

[ ]

-

Items in square brackets are optional.

-

{ x | y | ... }

-

Optional items are grouped in braces and separated by vertical bars. One item is selected.

-

[ x | y | ... ]

-

Optional items are grouped in brackets and separated by vertical bars. One item is selected or no item is selected.

-

{ x | y | ... }\*

-

Optional items are grouped in brackets and separated by vertical bars. A minimum of one or a maximum of all can be selected.

-

[ x | y | ... ]\*

-

Optional items are grouped in brackets and separated by vertical bars. One or more items are selected or no item is selected.

-
diff --git a/docs/en/docs/ApplicationDev/building-an-rpm-package.md b/docs/en/docs/ApplicationDev/building-an-rpm-package.md deleted file mode 100644 index 42d917c3824dcdefd49b0106af71fd6704d01d7c..0000000000000000000000000000000000000000 --- a/docs/en/docs/ApplicationDev/building-an-rpm-package.md +++ /dev/null @@ -1,818 +0,0 @@ -# Building an RPM Package - -This section describes how to build an RPM software package on a local PC or using OBS. For details, see [https://gitee.com/openeuler/community/blob/master/zh/contributors/packaging.md](https://gitee.com/openeuler/community/blob/master/zh/contributors/packaging.md). - - -- [Building an RPM Package](#building-an-rpm-package) - - [Packaging Description](#packaging-description) - - [Principles](#principles) - - [Packaging Process](#packaging-process) - - [Packaging Options](#packaging-options) - - [Building an RPM Package Locally](#building-an-rpm-package-locally) - - [Setting Up the Development Environment](#setting-up-the-development-environment) - - [Creating a Hello World RPM Package](#creating-a-hello-world-rpm-package) - - [Building an RPM Package Using the OBS](#building-an-rpm-package-using-the-obs) - - [OBS Overview](#obs-overview) - - [Building an RPM Software Package Online](#building-an-rpm-software-package-online) - - [Building a Software Package Using OSC](#building-a-software-package-using-osc) - - - - -## Packaging Description - -### Principles -During RPM packaging, the source code needs to be compiled. The compiled configuration files and binary command files need to be placed in proper positions. The RPM packages need to be tested as required. A workspace is required for these operations. The **rpmbuild** command uses a set of standard workspaces. - -``` -$ rpmdev-setuptree -``` - -The **rpmdev-setuptree** command is used to install rpmdevtools. After the command is executed, the **rpmbuild** folder is generated in the **/root** directory \(or the **/home/**_username_ directory for non-root users\). The directory structure is as follows: - -``` -$ tree rpmbuild -rpmbuild -├── BUILD -├── RPMS -├── SOURCES -├── SPECS -└── SRPMS -``` - -The content is described as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Content

-

Macro Code

-

Name

-

Function

-

~/rpmbuild/BUILD

-

%_builddir

-

Build directory.

-

The source code package is decompressed and compiled in a subdirectory of the directory.

-

~/rpmbuild/RPMS

-

%_rpmdir

-

Standard RPM package directory.

-

The binary RPM package is generated and stored in this directory.

-

~/rpmbuild/SOURCES

-

%_sourcedir

-

Source code directory.

-

The source code package (for example, .tar package) and all patches are stored in this directory.

-

~/rpmbuild/SPECS

-

%_specdir

-

Spec file directory.

-

The RPM package configuration file (.spec) is stored in this directory.

-

~/rpmbuild/SRPMS

-

%_srcrpmdir

-

Source code RPM package directory.

-

The source code RPM package (SRPM) is stored in this directory.

-
- -The **\~/rpmbuild/SPECS** directory contains the configuration file of the RPM package, which is the drawing of the RPM package. This file tells the **rpmbuild** command how to build the RPM package. The **Macro Code** column contains the corresponding directories in the .spec file, which is similar to the macro or global variable in the programming language. - -### Packaging Process -The packaging process is as follows: - -1. Place the source code in **%\_sourcedir**. -2. Compile the source code in **%\_builddir**. Generally, the source code is compressed and needs to be decompressed first. -3. Install the RPM package. The installation is similar to pre-assembling the software package. Copy the contents \(such as binary files, configuration files, and man files\) that should be contained in the software package to **%\_buildrootdir** and assemble the contents based on the actual directory structure after installation. For example, if binary commands are stored in **/usr/bin**, copy the directory structure to **%\_buildrootdir**. -4. Perform necessary configurations, such as preparations before installation and cleanup after installation. These are configured in the SPEC file to tell the **rpmbuild** command how to build. -5. Check whether the software is running properly. -6. The generated RPM package is stored in **%\_rpmdir**, and the source code package is stored in **%\_srpmdir**. - -In the SPEC file, each phase is described as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Phase

-

Directory to Be Read

-

Directory to Which Data Is Written

-

Action

-

%prep

-

%_sourcedir

-

%_builddir

-

Read the source code and patch in the %_sourcedir directory. Then, decompress the source code to the %_builddir subdirectory and apply all patches.

-

%build

-

%_builddir

-

%_builddir

-

Compile files in the %_builddir build directory. Run a command similar to ./configure && make.

-

%install

-

%_builddir

-

%_buildrootdir

-

Read files in the %_builddir build directory and install them to the %_buildrootdir directory. These files are generated after the RPM is installed.

-

%check

-

%_builddir

-

%_builddir

-

Check whether the software is running properly. Run a command similar to make test.

-

bin

-

%_buildrootdir

-

%_rpmdir

-

Read files in the %_buildrootdir final installation directory to create RPM packages in the %_rpmdir directory. In this directory, RPM packages of different architectures are stored in different subdirectories. The noarch directory stores RPM packages applicable to all architectures. These RPM files are the RPM packages that are finally installed by users.

-

src

-

%_sourcedir

-

%_srcrpmdir

-

Create the source code RPM package (SRPM for short, with the file name extension .src.rpm) and save it to the %_srcrpmdir directory. The SRPM package is usually used to review and upgrade software packages.

-
- -### Packaging Options - -Run the **rpmbuild** command to build the software package. The **rpmbuild** command can be used to build software packages by building .spec, .tar, and source files. - -The format of the **rpmbuild** command is rpmbuild \[_option_...\] - -[Table 1](#table1342946175212) describes the common rpmbuild packaging options. - -**Table 1** rpmbuild Packaging Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

option Value

-

Description

-

-bp specfile

-

Starts build from the %prep phase of the specfile (decompress the source code package and install the patch).

-

-bc specfile

-

Starts build from the %build phase of the specfile.

-

-bi specfile

-

Starts build from the %install phase of the specfile.

-

-bl specfile

-

Starts check from the %file phase of the specfile.

-

-ba specfile

-

Uses the specfile to build the source code package and binary package.

-

-bb specfile

-

Uses the specfile to build the binary package.

-

-bs specfile

-

Uses the specfile to build the source code package.

-

-rp sourcefile

-

Starts build from the %prep phase of the sourcefile (decompress the source code package and install the patch).

-

-rc sourcefile

-

Starts build from the %build phase of the sourcefile.

-

-ri sourcefile

-

Starts build from the %install phase of the sourcefile.

-

-rl sourcefile

-

Starts build from the %file phase of the sourcefile.

-

-ra sourcefile

-

Uses the sourcefile to build the source code package and binary package.

-

-rb sourcefile

-

Uses the sourcefile to build the binary package.

-

-rs sourcefile

-

Uses the sourcefile to build the source code package.

-

-tp tarfile

-

Starts build from the %prep phase of the tarfile (decompress the source code package and install the patch).

-

-tc tarfile

-

Starts build from the %build phase of the tarfile.

-

-ti tarfile

-

Starts build from the %install phase of the tarfile.

-

-ta tarfile

-

Uses the tarfile to build the source code package and binary package.

-

-tb tarfile

-

Uses the tarfile to build the binary package.

-

-ts tarfile

-

Uses the tarfile to build the source code package.

-

\-\-buildroot=DIRECTORY

-

During the build, uses DIRECTORY to overwrite the default /root directory.

-

\-\-clean

-

Deletes the files in the BUILD directory.

-

\-\-nobuild

-

No actual build steps are performed. It can be used to test the .spec file.

-

\-\-noclean

-

Skips the %clean phase of the .spec file (even if it does exist).

-

\-\-nocheck

-

Skips the %check phase of the .spec file (even if it does exist).

-

\-\-dbpath DIRECTORY

-

Uses the database in DIRECTORY instead of the default directory /var/lib/rpm.

-

\-\-root DIRECTORY

-

Sets DIRECTORY to the highest level. The default value is /, indicating the highest level.

-

\-\-rebuild sourcefile

-

Installs the specified source code package sourcefile, that is, start preparation, compilation, and installation of the source code package.

-

\-\-recompile sourcefile

-

Builds a new binary package based on \-\-recompile. When the build is complete, the build directory, source code, and .spec file are deleted.

-

The deletion effect is the same as that of \-\-clean.

-

-?, \-\-help

-

Displays detailed help information.

-

\-\-version

-

Displays detailed version information.

-
- -## Building an RPM Package Locally - -This section uses an example to describe how to build an RPM software package locally. - - - -### Setting Up the Development Environment - -#### Prerequisites -You have obtained the **root** permission, and have configured a repo source for openEuler. - -#### Procedure -You can use the DNF tool to install rpmdevtools, including the **rpm-build** command and related dependencies \(such as make and gdb\). Run the following command: - -``` -# dnf install rpmdevtools* -``` - -### Creating a Hello World RPM Package - -The following uses the packaging process of the GNU Hello World project as an example. The package contains the most common peripheral components related to the typical Free and Open Source Software \(FOSS\) project, including the configuration, compilation, and installation environments, documents, and internationalization \(i18n\) information. - - - -#### Obtaining the Source Code - -Run the following command to download the source code of the official example: - -``` -$ cd ~/rpmbuild/SOURCES -$ wget http://ftp.gnu.org/gnu/hello/hello-2.10.tar.gz -``` - -#### Editing the SPEC File - -Run the following command to create the .spec file in the **~/rpmbuild/SPECS** directory: - -``` -$ cd ~/rpmbuild/SPECS -$ vi hello.spec -``` - -Write the corresponding content to the file and save the file. The following is an example of the file content. Modify the corresponding fields based on the actual requirements. - -``` -Name: hello -Version: 2.10 -Release: 1%{?dist} -Summary: The "Hello World" program from GNU -Summary(zh_CN): GNU Hello World program -License: GPLv3+ -URL: http://ftp.gnu.org/gnu/hello -Source0: http://ftp.gnu.org/gnu/hello/%{name}-%{version}.tar.gz - -BuildRequires: gettext -Requires(post): info -Requires(preun): info - -%description -The "Hello World" program, done with all bells and whistles of a proper FOSS -project, including configuration, build, internationalization, help files, etc. - -%description -l zh_CN -The Hello World program contains all parts required by the FOSS project, including configuration, build, i18n, and help files. - -%prep -%setup -q - -%build -%configure -make %{?_smp_mflags} - -%install -make install DESTDIR=%{buildroot} -%find_lang %{name} -rm -f %{buildroot}/%{_infodir}/dir - -%post -/sbin/install-info %{_infodir}/%{name}.info %{_infodir}/dir || : - -%preun -if [ $1 = 0 ] ; then -/sbin/install-info --delete %{_infodir}/%{name}.info %{_infodir}/dir || : -fi - -%files -f %{name}.lang -%doc AUTHORS ChangeLog NEWS README THANKS TODO -%license COPYING -%{_mandir}/man1/hello.1.* -%{_infodir}/hello.info.* -%{_bindir}/hello - -%changelog -* Thu Dec 26 2019 Your Name - 2.10-1 -- Update to 2.10 -* Sat Dec 3 2016 Your Name - 2.9-1 -- Update to 2.9 -``` - -- The **Name** tag indicates the software name, the **Version** tag indicates the version number, and the **Release** tag indicates the release number. -- The **Summary** tag is a brief description. The first letter of the tag must be capitalized to prevent the rpmlint tool \(packaging check tool\) from generating alarms. -- The **License** tag describes the protocol version of the software package. The packager is responsible for checking the license status of the software, which can be implemented by checking the source code or license file or communicating with the author. -- The **Group** tag is used to classify software packages by **/usr/share/doc/rpm-/GROUPS**. Currently, this tag has been discarded. However, the VIM template still has this tag. You can delete it. However, adding this tag does not affect the system. The **%changelog** tag should contain the log of changes made for each release, especially the description of the upstream security/vulnerability patches. The **%changelog** tag should contain the version string to avoid the rpmlint tool from generating alarms. -- If multiple lines are involved, such as %changelog or %description, start from the next line of the instruction and end with a blank line. -- Some unnecessary lines \(such as BuildRequires and Requires\) can be commented out with a number sign \(\#\) at the beginning of the lines. -- The default values of **%prep**, **%build**, **%install**, and **%file** are retained. - -#### Building an RPM Package - -Run the following command in the directory where the .spec file is located to build the source code, binary files, and software packages that contain debugging information: - -``` -$ rpmbuild -ba hello.spec -``` - -Run the following command to view the execution result: - -``` -$ tree ~/rpmbuild/*RPMS - -/home/testUser/rpmbuild/RPMS -└── aarch64 - ├── hello-2.10-1.aarch64.rpm - ├── hello-debuginfo-2.10-1.aarch64.rpm - └── hello-debugsource-2.10-1.aarch64.rpm -/home/testUser/rpmbuild/SRPMS -└── hello-2.10-1.src.rpm -``` - -## Building an RPM Package Using the OBS - -This section describes how to build RPM software packages using the OBS on the web page or with OSC. There are two methods: - -- Modifying an existing software package: Modify the source code of an existing software package and build the modified source code into an RPM software package. -- Adding a software package: A new software source file is developed from scratch, and the newly developed source file is used to build an RPM software package. - - - -### OBS Overview - -OBS is a general compilation framework based on the openSUSE distribution. It is used to build source code packages into RPM software packages or Linux images. OBS uses the automatic distributed compilation mode and supports the compilation of images and installation packages of multiple Linux OS distributions \(such as openEuler, SUSE, and Debian\) on multiple architecture platforms \(such as x86 and ARM64\). - -OBS consists of the backend and frontend. The backend implements all core functions. The frontend provides web applications and APIs for interaction with the backend. In addition, OBS provides an API command line client OSC, which is developed in an independent repository. - -OBS uses the project organization software package. Basic permission control, related repository, and build targets \(OS and architecture\) can be defined in the project. A project can contain multiple subprojects. Each subproject can be configured independently to complete a task. - -### Building an RPM Software Package Online - -This section describes how to build an RPM software package online on OBS. - - - -#### Building an Existing Software Package - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- If you use OBS for the first time, register an individual account on the OBS web page. ->- With this method, you must copy the modified code and commit it to the code directory before performing the following operations. The code directory is specified in the **\_service** file. - -To modify the source code of the existing software and build the modified source file into an RPM software package on the OBS web client, perform the following steps: - -1. Log in to OBS at [https://build.openeuler.org/](https://build.openeuler.org/). -2. Click **All Projects**. The **All Projects** page is displayed. -3. Click the project to be modified. The project details page is displayed. For example, click **openEuler:Mainline**. -4. On the project details page, search for the software package to be modified and click the software package name. The software package details page is displayed. -5. Click **Branch package**. In the displayed dialog box, click **Accept**, as shown in [Figure 1](#fig77646143214). - - **Figure 1** **Branch Confirmation** page - ![](./figures/branch-confirmation-page.png "branch-confirmation-page") - -6. Click the **\_service** file to go to the editing page, modify the file content, and click **Save**. An example of the **\_service** file content is as follows. _userCodeURL_ and _userCommitID_ indicate the user code path and commission version number or branch, respectively. - - ``` - - - git - userCodeURL - userCommitID - - - bz2 - *.tar - - - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Click **Save** to save the **\_service** file. OBS downloads the source code from the specified URL to the software directory of the corresponding OBS project based on the **\_service** file description and replaces the original file. For example, the **kernel** directory of the **openEuler:Mainline** project in the preceding example. - -7. After the files are copied and replaced, OBS automatically starts to build the RPM software package. Wait until the build is complete and view the build status in the status bar on the right. - - **succeeded**: The build is successful. You can click **succeeded** to view the build logs, as shown in [Figure 2](#fig10319114217337). - - **Figure 2** **Succeeded** page - ![](./figures/succeeded-page.png "succeeded-page") - - - **failed**: The build failed. Click **failed** to view error logs, locate the fault, and rebuild again. - - **unresolvable**: The build is not performed. The possible cause is that the dependency is missing. - - **disabled**: The build is manually closed or is queuing for build. - - **excluded**: The build is prohibited. The possible cause is that the .spec file is missing or the compilation of the target architecture is prohibited in the .spec file. - - -#### Adding a Software Package - -To add a new software package on the OBS web page, perform the following steps: - -1. Log in to the OBS console. -2. Select a project based on the dependency of the new software package. That is, click **All Projects** and select the corresponding project, for example, **openEuler:Mainline**. -3. Click a software package in the project. The software package details page is displayed. -4. Click **Branch package**. On the confirmation page that is displayed, click **Accept**. -5. Click **Delete package** to delete the software package in the new subproject, as shown in [Figure 3](#fig18306181103615). - - **Figure 3** Deleting a software package from a subproject - ![](./figures/deleting-a-software-package-from-a-subproject.png "deleting-a-software-package-from-a-subproject") - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The purpose of creating a project by using existing software is to inherit the dependency such as the environment. Therefore, you need to delete these files. - -6. Click **Create Package**. On the page that is displayed, enter the software package name, title, and description, and click **Create** to create a software package, as shown in [Figure 4](#fig6762111693811) and [Figure 5](#fig18351153518389). - - **Figure 4** **Create Package** page - ![](./figures/create-package-page.png "create-package-page") - - **Figure 5** Creating a software package - ![](./figures/creating-a-software-package.png "creating-a-software-package") - -7. Click **Add file** to upload the .spec file and the file to be compiled \(specified in the .spec file\), as shown in [Figure 6](#fig1475845284011). - - **Figure 6** **Add file** page - ![](./figures/add-file-page.png "add-file-page") - -8. After the file is uploaded, OBS automatically starts to build the RPM software package. Wait until the build is complete and view the build status in the status bar on the right. - - **succeeded**: The build is successful. You can click **succeeded** to view the build logs. - - **failed**: The build failed. Click **failed** to view error logs, locate the fault, and rebuild again. - - **unresolvable**: The build is not performed. The possible cause is that the dependency is missing. - - **disabled**: The build is manually closed or is queuing for build. - - **excluded**: The build is prohibited. The possible cause is that the .spec file is missing or the compilation of the target architecture is prohibited in the .spec file. - - -#### Obtaining the Software Package - -After the RPM software package is built, perform the following operations to obtain the RPM software package on the web page: - -1. Log in to the OBS console. -2. Click **All Projects** and find the project corresponding to the required software package, for example, **openEuler:Mainline**. -3. Click the name of the required software package in the project. The software package details page is displayed, for example, the **kernel** page in the preceding example. - -1. Click the **Repositories** tab. On the software repository management page that is displayed, click **Enable** in **Publish Flag** to enable the RPM software package download function \(the status changes from ![](./figures/en-us_image_0229243704.png) to ![](./figures/en-us_image_0229243702.png)\), as shown in [Figure 7](#fig17480830144217). - - **Figure 7** **Repositories** page - ![](./figures/repositories-page.png "repositories-page") - -2. Click the project name in the **Repository** column. On the RPM software package download page that is displayed, click **Download** on the right of the RPM software package to download the RPM software package, as shown in [Figure 8](#fig12152145615438). - - **Figure 8** RPM software package download page - ![](./figures/rpm-software-package-download-page.png "rpm-software-package-download-page") - - -### Building a Software Package Using OSC - -This section describes how to use the OBS command line tool OSC to create a project and build an RPM software package. - - - -#### Installing and Configuring the OSC - -##### Prerequisites -You have obtained the **root** permission, and have configured a repo source for openEuler. - -##### Procedure -1. Install the OSC command line tool and its dependency as the **root** user. - - ``` - # dnf install osc build - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The compilation of RPM software packages depends on build. - -2. Configure the OSC. - 1. Run the following command to open the **\~/.oscrc** file: - - ``` - $ vi ~/.oscrc - ``` - - 2. Add the **user** and **pass** fields to **\~/.oscrc**. The values of _userName_ and _passWord_ are the account and password registered on the OBS website \([https://build.openeuler.org/](https://build.openeuler.org/)\). - - ``` - [general] - apiurl = https://build.openeuler.org/ - [https://build.openeuler.org/] - user=userName - pass=passWord - ``` - - -#### Building an Existing Software Package - -**Creating a Project** - -1. You can copy an existing project to create a subproject of your own. For example, to copy the **zlib** software package in the **openEuler:Mainline** project to the new branch, run the following command: - - ``` - $ osc branch openEuler:Mainline zlib - ``` - - If the following information is displayed, a new branch project **home:testUser:branches:openEuler:Mainline** is created for user **testUser**. - - ``` - A working copy of the branched package can be checked out with: - osc co home:testUser:branches:openEuler:Mainline/zlib - ``` - -2. Download the configuration file \(for example, **\_service**\) of the software package to be modified to the local directory. In the preceding command, _testUser_ indicates the account name configured in the **\~/.oscrc** configuration file. Change it based on the actual requirements. - - ``` - $ osc co home:testUser:branches:openEuler:Mainline/zlib - ``` - - Information similar to the following is displayed: - - ``` - A home:testUser:branches:openEuler:Mainline - A home:testUser:branches:openEuler:Mainline/zlib - A home:testUser:branches:openEuler:Mainline/zlib/_service - ``` - -3. Go to the local subproject directory and synchronize the remote code of the software package to the local host. - - ``` - $ cd home:testUser:branches:openEuler:Mainline/zlib - $ osc up -S - ``` - - Information similar to the following is displayed: - - ``` - A _service:tar_scm_kernel_repo:0001-Neon-Optimized-hash-chain-rebase.patch - A _service:tar_scm_kernel_repo:0002-Porting-optimized-longest_match.patch - A _service:tar_scm_kernel_repo:0003-arm64-specific-build-patch.patch - A _service:tar_scm_kernel_repo:zlib-1.2.11-optimized-s390.patch - A _service:tar_scm_kernel_repo:zlib-1.2.11.tar.xz - A _service:tar_scm_kernel_repo:zlib-1.2.5-minizip-fixuncrypt.patch - A _service:tar_scm_kernel_repo:zlib.spec - ``` - - -**Building an RPM Package** - -1. Rename the source file and add the renamed source file to the temporary storage of OBS. - - ``` - $ rm -f _service;for file in `ls | grep -v .osc`;do new_file=${file##*:};mv $file $new_file;done - $ osc addremove * - ``` - -2. Modify the source code and .spec file, and run the following command to update the file. - - ``` - $ osc up - ``` - -3. Synchronize all modifications of the corresponding software package to the OBS server. The following is an example of command. The information after the **-m** parameter indicates the submmission record. - - ``` - $ osc ci -m "commit log" - ``` - -4. Run the following command to obtain the repository name and architecture of the current project: - - ``` - $ osc repos home:testUser:branches:openEuler:Mainline - ``` - -5. After the modification is committed, OBS automatically compiles the software package. You can run the following command to view the compilation logs of the corresponding repository. In the command, *standard\_aarch64* and _aarch64_ indicate the repository name and architecture obtained in the command output. - - ``` - $ osc buildlog standard_aarch64 aarch64 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >You can also open the created project on the web client to view the build logs. - - -#### Adding a Software Package - -To use the OSC tool of OBS to add a new software package, perform the following steps: - -**Creating a Project** - -1. Create a project based on the dependency of the new software package and a proper project. For example, to create a project based on **zlib** of the **openEuler:Mainline** project, run the following command \(**zlib** is any software package in the project\): - - ``` - $ osc branch openEuler:Mainline zlib - ``` - -2. Delete unnecessary software packages added during project creation. For example, to delete the **zlib** software package, run the following command: - - ``` - $ cd home:testUser:branches:openEuler:Mainline - $ osc rm zlib - $ osc commit -m "commit log" - ``` - -3. Create a software package in your own project. For example, to add the **my-first-obs-package** software package, run the following command: - - ``` - $ mkdir my-first-obs-package - $ cd my-first-obs-package - ``` - - -**Building an RPM Package** - -1. Add the prepared source file and .spec file to the software package directory. -2. Modify the source code and .spec file, and upload all files of the corresponding software package to the OBS server. The following is a command example. The information after the **-m** parameter is the commission record. - - ``` - $ cd home:testUser:branches:openEuler:Mainline - $ osc add my-first-obs-package - $ osc ci -m "commit log" - ``` - -3. Run the following command to obtain the repository name and architecture of the current project: - - ``` - $ osc repos home:testUser:branches:openEuler:Mainline - ``` - -4. After the modification is committed, OBS automatically compiles the software package. You can run the following command to view the compilation logs of the corresponding repository. In the command, _standard\_aarch64_ and _aarch64_ indicate the repository name and architecture obtained in the command output. - - ``` - $ cd home:testUser:branches:openEuler:Mainline/my-first-obs-package - $ osc buildlog standard_aarch64 aarch64 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >You can also open the created project on the web client to view the build logs. - - -#### Obtaining the Software Package - -After the RPM software package is built, run the following command to obtain the RPM software package using the OSC: - -``` -$ osc getbinaries home:testUser:branches:openEuler:Mainline my-first-obs-package standard_aarch64 aarch64 -``` - -The parameters in the command are described as follows. You can modify the parameters according to the actual situation. - -- _home:testUser:branches:openEuler:Mainline_: name of the project to which the software package belongs. -- _my-first-obs-package_: name of the software package. -- _standard\_aarch64_: repository name. -- _aarch64_: repository architecture name. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->You can also obtain the software package built using OSC from the web page. For details, see [Obtaining the Software Package](#obtaining-the-software-package). - diff --git a/docs/en/docs/ApplicationDev/figures/add-file-page.png b/docs/en/docs/ApplicationDev/figures/add-file-page.png deleted file mode 100644 index 83f0bfaeeb9227bcbb863a93ab8d3535e2b2bc1d..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/add-file-page.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/branch-confirmation-page.png b/docs/en/docs/ApplicationDev/figures/branch-confirmation-page.png deleted file mode 100644 index e66cbcd22217b74785381b85128ea61895194882..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/branch-confirmation-page.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/create-package-page.png b/docs/en/docs/ApplicationDev/figures/create-package-page.png deleted file mode 100644 index 36ea525856d428b6f88a338202e7cb59b2204fc0..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/create-package-page.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/creating-a-software-package.png b/docs/en/docs/ApplicationDev/figures/creating-a-software-package.png deleted file mode 100644 index f983809e8288f3c2ba7e951b60a3ca3a0f18775a..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/creating-a-software-package.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/deleting-a-software-package-from-a-subproject.png b/docs/en/docs/ApplicationDev/figures/deleting-a-software-package-from-a-subproject.png deleted file mode 100644 index a365cd1f46bfb8bec094b79477c0168861a5193b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/deleting-a-software-package-from-a-subproject.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243671.png b/docs/en/docs/ApplicationDev/figures/en-us_image_0229243671.png deleted file mode 100644 index ad5ed3f7beeb01e6a48707c4806606b41d687e22..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243671.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243702.png b/docs/en/docs/ApplicationDev/figures/en-us_image_0229243702.png deleted file mode 100644 index 96096879d161f04750a332e5c749a834c49d3173..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243702.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243704.png b/docs/en/docs/ApplicationDev/figures/en-us_image_0229243704.png deleted file mode 100644 index 267bc9508f3a065b5b40c367e745f0d8c3ddb5fa..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243704.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243712.png b/docs/en/docs/ApplicationDev/figures/en-us_image_0229243712.png deleted file mode 100644 index 62ef0decdf6f1e591059904001d712a54f727e68..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/en-us_image_0229243712.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/repositories-page.png b/docs/en/docs/ApplicationDev/figures/repositories-page.png deleted file mode 100644 index b7c04eedf9dd32cf4a9d024a05f5c8b294c76934..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/repositories-page.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/rpm-software-package-download-page.png b/docs/en/docs/ApplicationDev/figures/rpm-software-package-download-page.png deleted file mode 100644 index 9f32d6c16d344df6951fc4e6aa027d02dfb9ccb5..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/rpm-software-package-download-page.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/figures/succeeded-page.png b/docs/en/docs/ApplicationDev/figures/succeeded-page.png deleted file mode 100644 index 3f10cd1db8bdc9be1ab8b660ef93e8a481c2d6b8..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/figures/succeeded-page.png and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/preparation.md b/docs/en/docs/ApplicationDev/preparation.md deleted file mode 100644 index 2e11b6c0d5fa98ed0d1ac5826dbf9048ecb907a0..0000000000000000000000000000000000000000 --- a/docs/en/docs/ApplicationDev/preparation.md +++ /dev/null @@ -1,493 +0,0 @@ -# Preparation - - - -- [Preparation](#preparation) - - [Configuring the Development Environment](#configuring-the-development-environment) - - [OS Requirements](#os-requirements) - - [Configuring a Repo Source](#configuring-a-repo-source) - - [Configuring a Repo Source by Directly Obtaining the Repo Source File](#configuring-a-repo-source-by-directly-obtaining-the-repo-source-file) - - [Configuring a Repo Source by Mounting an ISO File](#configuring-a-repo-source-by-mounting-an-iso-file) - - [Installing the Software Package](#installing-the-software-package) - - [Installing the JDK Software Package](#installing-the-jdk-software-package) - - [Installing the rpm-build Software Package](#installing-the-rpm-build-software-package) - - [Using the IDE for Java Development](#using-the-ide-for-java-development) - - [Overview](#overview) - - [Logging In to the Server Using MobaXterm](#logging-in-to-the-server-using-mobaxterm) - - [Setting the JDK Environment](#setting-the-jdk-environment) - - [Downloading and Installing the GTK Library](#downloading-and-installing-the-gtk-library) - - [Setting X11 Forwarding](#setting-x11-forwarding) - - [Downloading and Running IntelliJ IDEA](#downloading-and-running-intellij-idea) - - -## Configuring the Development Environment - -- If physical machines (PMs) are used, the minimum hardware requirements of the development environment are described in [Table 1](#table154419352610). - - **Table 1** Minimum hardware specifications - - - - - - - - - - - - - - - - - - - - - - - - - -

Component

-

Minimum Hardware Specification

-

Description

-

Architecture

-
  • AArch64
  • x86_64
-
  • 64-bit Arm architecture
  • 64-bit Intel x86 architecture
-

CPU

-
  • Huawei Kunpeng 920 series
  • Intel ® Xeon® processor
-

-

-

Memory

-

≥ 4 GB (8 GB or higher recommended for better user experience)

-

-

-

Hard disk

-

≥ 120 GB (for better user experience)

-

IDE, SATA, SAS interfaces are supported.

-
- -- If virtual machines (VMs) are used, the minimum virtualization space required for the development environment is described in [Table 2](#table780410493819). - - **Table 2** Minimum virtualization space - - - - - - - - - - - - - - - - - - - - - - - - - -

Component

-

Minimum Virtualization Space

-

Description

-

Architecture

-
  • AArch64
  • x86_64
-

-

-

CPU

-

Two CPUs

-

-

-

Memory

-

≥ 4 GB (8 GB or higher recommended for better user experience)

-

-

-

Hard disk

-

≥ 32 GB (120 GB or higher recommended for better user experience)

-

-

-
- - -### OS Requirements - -The openEuler OS is required. - -For details about how to install the openEuler OS, see the \[*openEuler 21.03 Installation Guide*\](./../Installation/Installation.html ). On the **SOFTWARE SELECTION** page, select **Development Tools** in the **Add-Ons for Selected Environment** area. - -## Configuring a Repo Source - -Configure an online yum source by directly obtaining the online openEuler repo source. Alternatively, configure a local yum source by mounting an ISO file and creating a local openEuler repo source. - -### Configuring a Repo Source by Directly Obtaining the Repo Source File - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -> openEuler provides multiple repo sources for users online. For details about the repo sources, see [System Installation](./../Releasenotes/installing-the-os.html). This section uses the **openEuler\_aarch64.repo** file as an example to describe how to configure the OS repo source as the yum source. - -1. Go to the yum source directory and check the .repo configuration file in the directory. - - ``` - $ cd /etc/yum.repos.d - $ ls - openEuler.repo - ``` - -2. Edit the **openEuler\_aarch64.repo** file as the **root** user. Configure the online openEuler repo source as the yum source. - - ``` - # vi openEuler.repo - ``` - - Edit the **openEuler\_aarch64.repo** file as follows: - - ``` - [osrepo] - name=osrepo - baseurl=http://repo.openeuler.org/openEuler-21.03/OS/aarch64/ - enabled=1 - gpgcheck=1 - gpgkey=http://repo.openeuler.org/openEuler-21.03/OS/aarch64/RPM-GPG-KEY-openEuler - ``` - - **** - - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > - > - The repoid in \[*repoid* \] indicates the ID of the software repository. Repoids in all .repo configuration files must be unique. In the example, repoid is set to **base**. - > - **name** indicates the string that the software repository describes. - > - **baseurl** indicates the address of the software repository. - > - **enabled** indicates whether to enable the software source repository. The value can be **1** or **0**. The default value is **1**, indicating that the software source repository is enabled. - > - **gpgcheck** indicates whether to enable the GNU privacy guard (GPG) to check the validity and security of sources of RPM packages. **1** indicates GPG check is enabled. **0** indicates the GPG check is disabled. If this option is not specified, the GPG check is enabled by default. - > - **gpgkey** is the public key used to verify the signature. - - -### Configuring a Repo Source by Mounting an ISO File - -> ![](./public_sys-resources/icon-note.gif) ********NOTE:******** -> openEuler provides multiple ISO release packages. For details about each ISO release package, see [System Installation](./../Releasenotes/installing-the-os.html). This section uses the **openEuler-21.03-aarch64-dvd.iso** file and **openEuler-21.03-aarch64-dvd.iso.sha256sum** verification file as examples. Modify them based on the actual requirements. - -1. Download the ISO release package. - - - Download an ISO image using a cross-platform file transfer tool. - - 1. Log in to the openEuler community at [https://openeuler.org](https://openeuler.org). - - 2. Click **Download**. - - 3. Click the link provided after **Download ISO**. The download list is displayed. - - 4. Select the version to be downloaded, for example, openEuler 21.03. Then, click **openEuler-21.03**. The download list is displayed. - - 5. Click **ISO**. The ISO download list is displayed. - - - **aarch64**: ISO image file of the AArch64 architecture - - **x86\_64**: ISO image file of the x86\_64 architecture - - **source**: ISO image file of the openEuler source code - - 6. Click **aarch64**. - - 7. Click **openEuler-21.03-aarch64-dvd.iso** to download the openEuler release package to the local host. - - 8. Click **openEuler-21.03-aarch64-dvd.iso.sha256sum** to download the openEuler verification file to the local host. - - 9. Log in to the openEuler OS and create a directory for storing the release package and verification file, for example, ~/iso\*\*. - - ``` - $ mkdir ~/iso - ``` - - 10. Use a cross-platform file transfer tool (such as WinSCP) to upload the local openEuler release package and verification file to the target openEuler OS. - - - Run the **wget** command to download the ISO image. - - 1. Log in to the openEuler community at [https://openeuler.org](https://openeuler.org). - - 2. Click **Download**. - - 3. Click the link provided after **Download ISO**. The download list is displayed. - - 4. Select the version to be downloaded, for example, openEuler 21.03. Then, click **openEuler-21.03**. The download list is displayed. - - 5. Click **ISO**. The ISO download list is displayed. - - - **aarch64**: ISO image file of the AArch64 architecture - - **x86\_64**: ISO image file of the x86\_64 architecture - - **source**: ISO image file of the openEuler source code - - 6. Click **aarch64**. - - 7. Right-click **openEuler-21.03-aarch64-dvd.iso** and choose **Copy URL** from the shortcut menu to copy the address of the openEuler release package. - - 8. Right-click **openEuler-21.03-aarch64-dvd.iso.sha256sum** and choose **Copy URL** from the shortcut menu to copy the address of the openEuler verification file. - - 9. Log in to the openEuler OS, create a directory (for example, **~/iso**) for storing the release package and verification file, and switch to the directory. - - ``` - $ mkdir ~/iso - $ cd ~/iso - ``` - - 10. Run the **wget** command to remotely download the release package and verification file. In the command, **ipaddriso** and **ipaddrisosum** are the addresses copied in [1.g](#li62369349505) and [1.h](#li9236203405015). - - ``` - $ wget ipaddriso - $ wget ipaddrisosum - ``` - -2. Release Package Integrity Check - - 1. Obtain the verification value in the verification file. - - ``` - $ cat openEuler-21.03-aarch64-dvd.iso.sha256sum - ``` - - 2. Calculate the SHA256 verification value of the openEuler release package. - - ``` - $ sha256sum openEuler-21.03-aarch64-dvd.iso - ``` - - After the command is run, the verification value is displayed. - - 3. Check whether the values calculated in step 1 and step 2 are consistent. - - If the verification values are consistent, the .iso file is not damaged. If they are inconsistent, the file is damaged and you need to obtain the file again. - -3. Mount the ISO file and configure it as a repo source. - - Run the **mount** command as the **root** user to mount the image file. - - The following is an example: - - ``` - # mount /home/iso/openEuler-21.03-aarch64-dvd.iso /mnt/ - ``` - - The mounted **mnt** directory is as follows: - - ``` - . - │── boot.catalog - │── docs - │── EFI - │── images - │── Packages - │── repodata - │── TRANS.TBL - └── RPM-GPG-KEY-openEuler - ``` - - In the preceding command, **Packages** indicates the directory where the RPM package is stored, **repodata** indicates the directory where the repo source metadata is stored, and **RPM-GPG-KEY-openEuler** indicates the public key for signing openEuler. - -4. Go to the yum source directory and check the .repo configuration file in the directory. - - ``` - $ cd /etc/yum.repos.d - $ ls - openEuler.repo - ``` - -6. Edit the **openEuler\_aarch64.repo** file as the **root** user. Configure the local openEuler repo source created in step [3](#li6236932222) as the yum source. - - ``` - # vi openEuler.repo - ``` - - Edit the **openEuler\_aarch64.repo** file as follows: - - ``` - [localosrepo] - name=localosrepo - baseurl=file:///mnt - enabled=1 - gpgcheck=1 - gpgkey=file:///mnt/RPM-GPG-KEY-openEuler - ``` - -## Installing the Software Package - -Install the software required for development. The software required varies in different development environments. However, the installation methods are the same. This section describes how to install common software packages (such as JDK and rpm-build). Some development software, such as GCC and GNU make, is provided by the openEuler OS by default. - -### Installing the JDK Software Package - -1. Run the **dnf list installed \| grep jdk** command to check whether the JDK software is installed. - - ``` - $ dnf list installed | grep jdk - ``` - - Check the command output. If the command output contains "jdk", the JDK has been installed. If no such information is displayed, the software is not installed. - -2. Clear the cache. - - ``` - $ dnf clean all - ``` - -3. Create a cache. - - ``` - $ dnf makecache - ``` - -4. Query the JDK software package that can be installed. - - ``` - $ dnf search jdk | grep jdk - ``` - - View the command output and install the **java-x.x.x-openjdk-devel.aarch64** software package. **x.x.x** indicates the version number. - -5. Install the JDK software package as the **root** user. The following uses the **java-1.8.0-openjdk-devel** software package as an example. - - ``` - # dnf install java-1.8.0-openjdk-devel.aarch64 - ``` - -6. Query information about the JDK software. - - ``` - $ java -version - ``` - - Check the command output. If the command output contains "openjdk version "1.8.0\_232"", the JDK has been correctly installed. In the command output, **1.8.0\_232** indicates the JDK version. - -### Installing the rpm-build Software Package - -1. Run the **dnf list installed \| grep rpm-build** command to check whether the rpm-build software is installed. - - ``` - $ dnf list installed | grep rpm-build - ``` - - Check the command output. If the command output contains "rpm-build", the software has been installed. If no such information is displayed, the software is not installed. - -2. Clear the cache. - - ``` - $ dnf clean all - ``` - -3. Create a cache. - - ``` - $ dnf makecache - ``` - -4. Install the rpm-build package as the **root** user. - - ``` - # dnf install rpm-build - ``` - -5. Query the rpm-build software version. - - ``` - $ rpmbuild --version - ``` - -## Using the IDE for Java Development - -For small-sized Java applications, you can directly use JDK to compile them to run Java applications. However, for medium- and large-sized Java applications, this method cannot meet the development requirements. You can perform the following steps to install and use the IDE to facilitate Java development on the openEuler OS. - -### Overview - -IntelliJ IDEA is a popular Java IDE. You can download the community edition of IntelliJ IDEA for free. Currently, openEuler supports Java development in the IntelliJ IDEA integrated development environment (IDE), improving the work efficiency of developers. - -### Logging In to the Server Using MobaXterm - -MobaXterm is an excellent SSH client. It has an X Server and can easily solve remote GUI display problems. - -You need to download, install, and start MobaXterm in advance, and then log in to your server in SSH mode to perform the following operations: - -### Setting the JDK Environment - -Before setting JAVA\_HOME, you need to find the installation path of the JDK. You are supported to have installed the JDK. If you have not installed the JDK, install it by referring to Preparation > Installing the Software Package > Installing the JDK Software Package. - -Run the following command to view the Java path: - -``` -$ which java -/usr/bin/java -``` - -Run the following command to check the directory to which the soft link points: - -``` -$ ls -la /usr/bin/java -lrwxrwxrwx. 1 root root 22 Mar 6 20:28 /usr/bin/java -> /etc/alternatives/java -$ ls -la /etc/alternatives/java -lrwxrwxrwx. 1 root root 83 Mar 6 20:28 /etc/alternatives/java -> /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-1.h2.aarch64/jre/bin/java -``` - -The actual path is **/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-1.h2.aarch64**. Run the following command to set **JAVA\_HOME** and **PATH**: - -``` -$ export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-1.h2.aarch64 -$ export PATH=$JAVA_HOME/bin:$PATH -``` - -### Downloading and Installing the GTK Library - -Run the following command: - -``` -$ dnf list installed | grep gtk -``` - -If **gtk2** or **gtk3** is displayed, the GTK library has been installed. In this case, skip this step. Otherwise, run the following command as the **root** user to automatically download and install the GTK library: - -``` -# dnf -y install gtk2 libXtst libXrender xauth -``` - -### Setting X11 Forwarding - -Switch to the SSHD configuration directory. - -``` -$ cd ~/.ssh -``` - -If the directory does not exist, run the following command to create the directory and then switch to the directory: - -``` -$ mkdir ~/.ssh -``` - -Edit the configuration file in the **.ssh** directory and save the file. - -1. Run the **vim** command to open the configuration file. - - ``` - $ vim config - ``` - -2. Add the following content to the end of the file and save the file: - - ``` - Host * - ForwardAgent yes - ForwardX11 yes - ``` - -### Downloading and Running IntelliJ IDEA - -After the preceding environment configuration is complete, you can download and run the IntelliJ IDEA. The latest version of IntelliJ IDEA is incompatible with openEuler in some functions. You are advised to click [here](https://www.jetbrains.com/idea/download/other.html) and download the Linux package of the 2018 version. Move the downloaded package to the directory where you want to install the software and decompress the package. - -``` -$ tar xf ideaIC-2018.3.tar.gz -``` - -Decompress the package, switch to the IntelliJ IDEA directory, and run the IntelliJ IDEA. - -``` -$ cd ./idea-IC-183.4284.148 -$ bin/idea.sh & -``` \ No newline at end of file diff --git a/docs/en/docs/ApplicationDev/public_sys-resources/icon-caution.gif b/docs/en/docs/ApplicationDev/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/public_sys-resources/icon-danger.gif b/docs/en/docs/ApplicationDev/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/public_sys-resources/icon-note.gif b/docs/en/docs/ApplicationDev/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/public_sys-resources/icon-notice.gif b/docs/en/docs/ApplicationDev/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/public_sys-resources/icon-tip.gif b/docs/en/docs/ApplicationDev/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/public_sys-resources/icon-warning.gif b/docs/en/docs/ApplicationDev/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/ApplicationDev/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/ApplicationDev/using-gcc-for-compilation.md b/docs/en/docs/ApplicationDev/using-gcc-for-compilation.md deleted file mode 100644 index bd66aaf38282ff97ec281d35e0af4dfb933f0e17..0000000000000000000000000000000000000000 --- a/docs/en/docs/ApplicationDev/using-gcc-for-compilation.md +++ /dev/null @@ -1,620 +0,0 @@ -# Using GCC for Compilation - -This chapter describes the basic knowledge of GCC compilation and provides examples for demonstration. For more information about GCC, run the **man gcc** command. - - -- [Using GCC for Compilation](#using-gcc-for-compilation) - - [Overview](#overview) - - [Basics](#basics) - - [File Type](#file-type) - - [Compilation Process](#compilation-process) - - [Compilation Options](#compilation-options) - - [Multi-file Compilation](#multi-file-compilation) - - [Libraries](#libraries) - - [Dynamic Link Library](#dynamic-link-library) - - [Static Link Library](#static-link-library) - - [Examples](#examples) - - [Example for Using GCC to Compile C Programs](#example-for-using-gcc-to-compile-c-programs) - - [Example for Creating and Using a DLL Using GCC](#example-for-creating-and-using-a-dll-using-gcc) - - [Example for Creating and Using an SLL Using GCC](#example-for-creating-and-using-an-sll-using-gcc) - - - - -## Overview - -The GNU Compiler Collection \(GCC\) is a powerful and high-performance multi-platform compiler developed by GNU. The GCC compiler can compile and link source programs, assemblers, and target programs of C and C++ into executable files. By default, the GCC software package is installed in the openEuler OS. - -## Basics - - - -### File Type - -For any given input file, the file type determines which compilation to perform. [Table 1](#table634145764320) describes the common GCC file types. - -**Table 1** Common GCC file types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Extension (Suffix)

-

Description

-

.c

-

C source code file.

-

.C, .cc, or .cxx

-

C++ source code file.

-

.m

-

Objective-C source code file.

-

.s

-

Assembly language source code file.

-

.i

-

Preprocessed C source code file.

-

.ii

-

Preprocessed C++ source code file.

-

.S

-

Pre-processed assembly language source code file.

-

.h

-

Header file contained in the program.

-

.o

-

Target file after compilation.

-

.so

-

Dynamic link library, which is a special target file.

-

.a

-

Static link library.

-

.out

-

Executable files, which do not have a fixed suffix. The system distinguishes executable files from unexecutable files based on file attributes. If the name of an executable file is not given, GCC generates a file named a.out.

-
- -### Compilation Process - -Using GCC to generate executable files from source code files requires preprocessing, compilation, assembly, and linking. - -1. Preprocessing: Preprocess the source program \(such as a **.c** file\) to generate an **.i** file. -2. Compilation: Compile the preprocessed **.i** file into an assembly language to generate an **.s** file. -3. Assemble: Assemble the assembly language file to generate the target file **.o**. -4. Linking: Link the **.o** files of each module to generate an executable program file. - -The **.i**, **.s**, and **.o** files are intermediate or temporary files. If the GCC is used to compile programs in C language at a time, these files will be deleted. - -### Compilation Options - -GCC compilation command format: **gcc** \[_options_\] \[_filenames_\] - -In the preceding information: - -_options_ : compilation options. - -_filenames_ : file name. - -GCC is a powerful compiler. It has many _options_, but most of them are not commonly used. [Table 2](#table1342946175212) describes the common _options_. - -**Table 2** Common GCC compilation options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

options Value

-

Description

-

Example

-

-c

-

Compiles and assembles specified source files to generate target files without linking them. It is usually used to compile subprogram files.

-

# Use the -c option to compile the source files test1.c and test2.c.

-

gcc -c test1.c test2.c

-

-S

-

Compiles the specified source file to generate an assembly language file with the .s suffix but without assembling it.

-

# Use the compiler to preprocess circle.c, translate it into assembly language, and store the result in circle.s.

-

gcc -S circle.c

-

-E

-

Preprocesses specified source files without compiling them.

-

By default, the output of the preprocessor is imported to a standard output stream, such as a display. You can use the -o option to import it to an output file.

-

# Export the preprocessing result to the circle.i file.

-

gcc -E circle.c -o circle.i

-

-o file

-

Generates a specified output file when an executable file is generated. The name must be different from that of the source file. If this option is not given, GCC generates the preset executable file a.out.

-

# Use the source file as the input file and the executable file as the output file. That is, compile the entire program.

-

gcc main.c func.c -o app.out

-

-g

-

Contains standard debugging information in executable programs.

-

-

-

-L libary_path

-

Adds the library_path to the library file search path list.

-

-

-

-Ilibrary

-

Searches for the specified function library during linking.

-

When GCC is used to compile and link programs, GCC links libc.a or libc.so by default. However, other libraries (such as non-standard libraries and third-party libraries) need to be manually added.

-

# Use the -l option to link the math library.

-

gcc main.c -o main.out -lm

-
NOTE:

The file name of the math library is libm.a. The prefix lib and suffix .a are standard, and m is the basic name. GCC automatically adds these prefixes and suffixes to the basic name following the -l option. In this example, the basic name is m.

-
-

-I head_path

-

Adds the head_path to the search path list of the header file.

-

-

-

-static

-

Performs static compilation and links static libraries. Do not link dynamic libraries.

-

-

-

-shared

-

Default option, which can be omitted.

-
  • A dynamic library file can be generated.
  • During dynamic compilation, the dynamic library is preferentially linked. The static library with the same name is linked only when there is no dynamic library.
-

-

-

-fPIC (or -fpic)

-

Generates location-independent target code that uses a relative address. Generally, the -static option is used to generate a dynamic library file from the PIC target file.

-

-

-
- -### Multi-file Compilation - -There are two methods provided for compiling multiple source files. - -- Multiple source files are compiled at the same time. All files need to be recompiled during compilation. - - Example: Compile **test1.c** and **test2.c** and link them to the executable file **test**. - - ``` - $ gcc test1.c test2.c -o test - ``` - -- Compile each source file, and then link the target files generated after compilation. During compilation, only modified files need to be recompiled. - - For example, compile **test1.c** and **test2.c**, and link the target files **test1.o** and **test2.o** to the executable file **test**. - - ``` - $ gcc -c test1.c - $ gcc -c test2.c - $ gcc -o test1.o test2.o -o test - ``` - - -## Libraries - -A library is mature and reusable code that has been written for use. Each program depends on many basic underlying libraries. - -The library file name is prefixed with lib and suffixed with .so \(dynamic library\) or .a \(static library\). The middle part is the user-defined library file name, for example, libfoo.so or libfoo.a. Because all library files comply with the same specifications, the **lib** prefix can be omitted when the **-l** option specifies the name of the linked library file. That is, when GCC processes **-lfoo**, the library file **libfoo.so** or **libfoo.a** is automatically linked. When creating a library, you must specify the full file name **libfoo.so** or **libfoo.a**. - -Libraries are classified into static libraries and dynamic libraries based on the linking time. The static library links and packs the target file .o generated by assembly and the referenced library into an executable file in the linking phase. The dynamic library is not linked to the target code when the program is compiled, but is loaded when the program is run. The differences are as follows: - -- The resource usage is different. - - The static library is a part of the generated executable file, while the dynamic library is a separate file. Therefore, the sizes and occupied disk space of the executable files of the static library and dynamic library are different, which leads to different resource usage. - -- The scalability and compatibility are different. - - If the implementation of a function in the static library changes, the executable file must be recompiled. For the executable file generated by dynamic linking, only the dynamic library needs to be updated, and the executable file does not need to be recompiled. - -- The dependency is different. - - The executable file of the static library can run without depending on any other contents, while the executable file of the dynamic library must depend on the dynamic library. Therefore, the static library is convenient to migrate. - -- The loading speeds are different. - - Static libraries are linked together with executable files, while dynamic libraries are linked only when they are loaded or run. Therefore, for the same program, static linking is faster than dynamic linking. - - - -### Dynamic Link Library - -You can use the **-shared** and **-fPIC** options to create a dynamic link library \(DLL\) with the source file, assembly file, or target file. The **-fPIC** option is used in the compilation phase. This option is used when the target file is generated, so as to generate location-independent code. - -Example 1: Generate a DLL from the source file. - -``` -$ gcc -fPIC -shared test.c -o libtest.so -``` - -Example 2: Generate a DLL from the target file. - -``` -$ gcc -fPIC -c test.c -o test.o -$ gcc -shared test.o -o libtest.so -``` - -To link a DLL to an executable file, you need to list the name of the DLL in the command line. - -Example: Compile **main.c** and **libtest.so** into **app.out**. When **app.out** is running, the link library **libtest.so** is dynamically loaded. - -``` -$ gcc main.c libtest.so -o app.out -``` - -In this mode, the **libtest.so** file in the current directory is used. - -If you choose to search for a DLL, to ensure that the DLL can be linked when the program is running, you must implement by using one of the following methods: - -- Save the DLL to a standard directory, for example, **/usr/lib**. -- Add the DLL path **libaryDIR** to the environment variable **LD\_LIBRARY\_PATH**. - - $ export LD\_LIBRARY\_PATH=libraryDIR:$LD\_LIBRARY\_PATH - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >**LD\_LIBRARY\_PATH** is an environment variable of the DLL. If the DLL is not in the default directories \(**/lib** and **/usr/lib**\), you need to specify the environment variable **LD\_LIBRARY\_PATH**. - -- Add the DLL path **libaryDIR** to **/etc/ld.so.conf** and run **ldconfig**, or use the DLL path **libaryDIR** as a parameter to run **ldconfig**. - -``` -$ gcc main.c -L libraryDIR -ltest -o app.out -$ export LD_LIBRARY_PATH=libraryDIR:$LD_LIBRARY_PATH -``` - -### Static Link Library - -To create a static link library \(SLL\), you need to compile the source file to the target file, and then run the **ar** command to compress the target file into an SLL. - -Example: Compile and compress source files **test1.c**, **test2.c**, and **test3.c** into an SLL. - -``` -$ gcc -c test1.c test2.c test3.c -$ ar rcs libtest.a test1.o test2.o test3.o -``` - -The **ar** command is a backup compression command. You can compress multiple files into a backup file \(also called an archive file\) or extract member files from the backup file. The most common use of **ar** is to compress the target files into an SLL. - -The format of the **ar** command to compress the target files into an SLL is as follows: - -ar rcs _Sllfilename_ _Targetfilelist_ - -- _Sllfilename_ : Name of the static library file. -- _Targetfilelist_ : Target file list. -- **r**: replaces the existing target file in the library or adds a new target file. -- **c**: creates a library regardless of whether the library exists. -- **s**: creates the index of the target file. The speed can be improved when a large library is created. - -Example: Create a main.c file to use the SLL. - -``` -$ gcc main.c -L libraryDIR -ltest -o test.out -``` - -In the preceding command, **libraryDIR** indicates the path of the libtest.a library. - -## Examples - - - -### Example for Using GCC to Compile C Programs - -1. Run the **cd** command to go to the code directory. The **~/code** directory is used as an example. The command is as follows: - - ``` - $ cd ~/code - ``` - -2. Compile the Hello World program and save it as **helloworld.c**. The following uses the Hello World program as an example. The command is as follows: - - ``` - $ vi helloworld.c - ``` - - Code example: - - ``` - #include - int main() - { - printf("Hello World!\n"); - return 0; - } - ``` - -3. Run the following command to compile the code in the code directory: - - ``` - $ gcc helloworld.c -o helloworld - ``` - - If no error is reported, the execution is successful. - -4. After the compilation is complete, the helloworld file is generated. Check the compilation result. The following is an example: - - ``` - $ ./helloworld - Hello World! - ``` - - -### Example for Creating and Using a DLL Using GCC - -1. Run the **cd** command to go to the code directory. The **~/code** directory is used as an example. Create the **src**, **lib**, and **include** subdirectories in the directory to store the source file, DLL file, and header file, respectively. - - ``` - $ cd ~/code - $ mkdir src lib include - ``` - -2. Run the **cd** command to go to the **~/code/src** directory and create two functions **add.c** and **sub.c** to implement addition and subtraction, respectively. - - ``` - $ cd ~/code/src - $ vi add.c - $ vi sub.c - ``` - - The following is an example of the **add.c** code: - - ``` - #include "math.h" - int add(int a, int b) - { - return a+b; - } - ``` - - The following is an example of the **sub.c** code: - - ``` - #include "math.h" - int sub(int a, int b) - { - return a-b; - } - ``` - -3. Compile the source files add.c and sub.c into the DLL libmath.so, and store the DLL in the **~/code/lib** directory. - - ``` - $ gcc -fPIC -shared add.c sub.c -o ~/code/lib/libmath.so - ``` - -4. Go to the **~/code/include** directory, create a header file **math.h**, and declare the header file of the function. - - ``` - $ cd ~/code/include - $ vi math.h - ``` - - The following is an example of the **math.h** code: - - ``` - #ifndef __MATH_H_ - #define __MATH_H_ - int add(int a, int b); - int sub(int a, int b); - #endif - ``` - -5. Run the **cd** command to go to the **~/code/src** directory and create a **main.c** function that invokes add\(\) and sub\(\). - - ``` - $ cd ~/code/src - $ vi main.c - ``` - - The following is an example of the **math.c** code: - - ``` - #include - #include "math.h" - int main() - { - int a, b; - printf("Please input a and b:\n"); - scanf("%d %d", &a, &b); - printf("The add: %d\n", add(a,b)); - printf("The sub: %d\n", sub(a,b)); - return 0; - } - ``` - -6. Compile **main.c** and **libmath.so** into **math.out**. - - ``` - $ gcc main.c -I ~/code/include -L ~/code/lib -lmath -o math.out - ``` - -7. Add the path of the DLL to the environment variable. - - ``` - $ export LD_LIBRARY_PATH=~/code/lib:$LD_LIBRARY_PATH - ``` - -8. Run the following command to execute **math.out**: - - ``` - $ ./math.out - ``` - - The command output is as follows: - - ``` - Please input a and b: - 9 2 - The add: 11 - The sub: 7 - ``` - - -### Example for Creating and Using an SLL Using GCC - -1. Run the **cd** command to go to the code directory. The **~/code** directory is used as an example. Create the **src**, **lib**, and **include** subdirectories in the directory to store the source file, SLL file, and header file respectively. - - ``` - $ cd ~/code - $ mkdir src lib include - ``` - -2. Run the **cd** command to go to the **~/code/src** directory and create two functions **add.c** and **sub.c** to implement addition and subtraction, respectively. - - ``` - $ cd ~/code/src - $ vi add.c - $ vi sub.c - ``` - - The following is an example of the **add.c** code: - - ``` - #include "math.h" - int add(int a, int b) - { - return a+b; - } - ``` - - The following is an example of the **sub.c** code: - - ``` - #include "math.h" - int sub(int a, int b) - { - return a-b; - } - ``` - -3. Compile the source files **add.c** and **sub.c** into the target files **add.o** and **sub.o**. - - ``` - $ gcc -c add.c sub.c - ``` - -4. Run the **ar** command to compress the **add.o** and **sub.o** target files into the SLL **libmath.a** and save the SLL to the **~/code/lib** directory. - - ``` - $ ar rcs ~/code/lib/libmath.a add.o sub.o - ``` - -5. Go to the **~/code/include** directory, create a header file **math.h**, and declare the header file of the function. - - ``` - $ cd ~/code/include - $ vi math.h - ``` - - The following is an example of the **math.h** code: - - ``` - #ifndef __MATH_H_ - #define __MATH_H_ - int add(int a, int b); - int sub(int a, int b); - #endif - ``` - -6. Run the **cd** command to go to the **~/code/src** directory and create a **main.c** function that invokes add\(\) and sub\(\). - - ``` - $ cd ~/code/src - $ vi main.c - ``` - - The following is an example of the **math.c** code: - - ``` - #include - #include "math.h" - int main() - { - int a, b; - printf("Please input a and b:\n"); - scanf("%d %d", &a, &b); - printf("The add: %d\n", add(a,b)); - printf("The sub: %d\n", sub(a,b)); - return 0; - } - ``` - -7. Compile **main.c** and **libmath.a** into **math.out**. - - ``` - $ gcc main.c -I ~/code/include -L ~/code/lib -lmath -o math.out - ``` - -8. Run the following command to execute **math.out**: - - ``` - $ ./math.out - ``` - - The command output is as follows: - - ``` - Please input a and b: - 9 2 - The add: 11 - The sub: 7 - ``` diff --git a/docs/en/docs/ApplicationDev/using-jdk-for-compilation.md b/docs/en/docs/ApplicationDev/using-jdk-for-compilation.md deleted file mode 100644 index 4946b917d12b6bf483f3507c1a327deaf3a04de0..0000000000000000000000000000000000000000 --- a/docs/en/docs/ApplicationDev/using-jdk-for-compilation.md +++ /dev/null @@ -1,531 +0,0 @@ -# Using JDK for Compilation - - -- [Using JDK for Compilation](#using-jdk-for-compilation) - - [Overview](#overview) - - [Basics](#basics) - - [File Type and Tool](#file-type-and-tool) - - [Java Program Generation Process](#java-program-generation-process) - - [Common JDK Options](#common-jdk-options) - - [Class Library](#class-library) - - [Package Declaration](#package-declaration) - - [Package Reference](#package-reference) - - [Examples](#examples) - - [Compiling a Java Program Without a Package](#compiling-a-java-program-without-a-package) - - [Compiling a Java Program with a Package](#compiling-a-java-program-with-a-package) - - - -## Overview - -A Java Development Kit \(JDK\) is a software package required for Java development. It contains the Java Runtime Environment \(JRE\) and compilation and commissioning tools. On the basis of OpenJDK, openEuler optimizes GC, enhances concurrency stability, and enhances security, improving the performance and stability of Java applications on ARM. - -## Basics - - - -### File Type and Tool - -For any given input file, the file type determines which tool to use for processing. The common file types and tools are described in [Table 1](#table634145764320) and [Table 2](#table103504146433). - -**Table 1** Common JDK file types - - - - - - - - - - - - - - - - -

Extension (Suffix)

-

Description

-

.java

-

Java source code file.

-

.class

-

Java bytecode file, which is intermediate code irrelevant to any specific machine or OS environment. It is a binary file, which is the target code file generated after the Java source file is compiled by the Java compiler.

-

.jar

-

JAR package of Java files.

-
- -**Table 2** Common JDK tools - - - - - - - - - - - - - - - - -

Name

-

Description

-

java

-

Java running tool, which is used to run .class bytecode files or .jar files.

-

javac

-

Compiles Java source code files into .class bytecode files.

-

jar

-

Creates and manages JAR files.

-
- -### Java Program Generation Process - -To generate a program from Java source code files and run the program using Java, compilation and run are required. - -1. Compilation: Use the Java compiler \(javac\) to compile Java source code files \(.java files\) into .class bytecode files. -2. Run: Execute the bytecode files on the Java virtual machine \(JVM\). - -### Common JDK Options - -#### Javac Compilation Options - -The command format for javac compilation is as follows: **javac** \[_options_\] \[_sourcefiles_\] \[_classes_\] \[@_argfiles_\] - -In the preceding information: - -_options_: command options. - -_sourcefiles_: one or more source files to be compiled. - -_classes_: one or more classes to be processed as comments. - -@_argfiles_: one or more files that list options and source files. The **-J** option is not allowed in these files. - -Javac is a Java compiler. It has many _options_, but most of them are not commonly used. [Table 3](#table1342946175212) describes the common options values. - -**Table 3** Common javac options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

options Value

-

Description

-

Example

-

-d path

-

Path for storing the generated class files.

-

By default, the class files generated after compilation are in the same path as the source file. You can use the -d option to export the class files to the specified path.

-

# Use the -d option to export all class files to the bin directory.

-

javac /src/*.java -d /bin

-

-s path

-

Path for storing the generated source files.

-

-

-

-cp path or -classpath path

-

Searches for the class files required for compilation and specifies the location of the class files.

-

# In the Demo, the getLine() method in the GetStringDemo class needs to be invoked. The .class file compiled by the GetStringDemo class is stored in the bin directory.

-

javac -cp bin Demo.java -d bin

-

-verbose

-

Outputs information about the operations being performed by the compiler, such as loaded class information and compiled source file information.

-

# Display information about the operations that are being performed by the compiler.

-

javac -verbose -cp bin Demo.java

-

-source sourceversion

-

Specifies the location of the input source files to be searched for.

-

-

-

-sourcepath path

-

Searches for source files (Java files) required for compilation and specifies the location of the source files to be searched for, for example, JAR, ZIP, or other directories that contain Java files.

-

-

-

-target targetversion

-

Generates class files of a specific JVM version. The value can be 1.1, 1.2, 1.3, 1.4, 1.5 (or 5), 1.6 (or 6), 1.7 (or 7), or 1.8 (or 8). The default value of targetversion is related to sourceversion of the -source option. The options of sourceversion are as follows:

-
  • 1.2, corresponding to target version 1.4
  • 1.3, corresponding to target version 1.4
  • 1.5, 1.6, 1.7, and unspecified, corresponding to target version 1.8
  • For other values, the values of targetversion and sourceversion are the same.
-

-

-
- -#### Java Running Options - -The Java running format is as follows: - -Running class file: **java** \[_options_\] _classesname_ \[args\] - -Running Java file: **java** \[_options_\] -jar _filename_ \[args\] - -In the preceding information: - -_options_: command options, which are separated by spaces. - -_classname_: name of the running .class file. - -_filename_: name of the running .jar file. - -args: parameters transferred to the main\(\) function. The parameters are separated by spaces. - -Java is a tool for running Java applications. It has many _options_, but most of them are not commonly used. [Table 4](#table371918587238) describes the common options. - -**Table 4** Common Java running options - - - - - - - - - - - - - - - - -

options Value

-

Description

-

Example

-

-cp path or -classpath path

-

Specifies the location of the file to be run and the class path to be used, including the .jar, .zip, and class file directories.

-

If there are multiple paths, separate them with colons (:).

-

-

-

-verbose

-

Outputs information about the operations being performed by the compiler, such as loaded class information and compiled source file information.

-

# Display information about the operations that are being performed by the compiler.

-

java -verbose -cp bin Demo.java

-
- -#### JAR Options - -The JAR command format is as follows: **jar** \{c | t | x | u\}\[vfm0M\] \[_jarfile_\] \[_manifest_\] \[-C _dir_\] _file_... - -[Table 5](#table3691718114817) describes the parameters in the **jar** command. - -**Table 5** JAR parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Example

-

c

-

Creates a JAR package.

-

# Compress the hello.class files in the current directory into Hello.jar. The compression process is not displayed. If the Hello.jar files do not exist, create them. Otherwise, clear the directory.

-

jar cf Hello.jar hello.class

-

t

-

Lists the contents of a JAR package.

-

# List the files contained in Hello.jar.

-

jar tf Hello.jar

-

x

-

Decompresses a JAR package.

-

# Decompress Hello.jar to the current directory. No information is displayed.

-

jar xf Hello.jar

-

u

-

Updates the existing JAR package, for example, add files to the JAR package.

-

-

-

v

-

Generates a detailed report and prints it to the standard output.

-

# Compress the hello.class files in the current directory into Hello.jar and display the compression process. If the Hello.jar files do not exist, create them. Otherwise, clear the directory.

-

jar cvf Hello.jar hello.class

-

f

-

Specifies the name of a JAR package. This parameter is mandatory.

-

-

-

m

-

Specifies the manifest file to be contained.

-

-

-

0

-

If this parameter is not set, the generated JAR package is larger but faster than that generated when this parameter is not set.

-

-

-

M

-

If the manifest file of all items is not generated, this parameter will be ignored.

-

# Compress the hello.class files in the current directory into Hello.jar and display the compression process. If the Hello.jar files do not exist, create them. Otherwise, clear the directory. However, the manifest file is not generated when Hello.jar is created.

-

jar cvfM Hello.jar hello.class

-

jarfile

-

JAR package, which is an auxiliary parameter of the f parameter.

-

-

-

manifest

-

Manifest file in .mf format, which is an auxiliary parameter of the m parameter.

-

-

-

-C dir

-

Runs the jar command in the specified dir. This command can be used only with parameters c and t.

-

-

-

file

-

Specifies the file or path list. All files in the file or path (including those in the recursive path) are compressed into the JAR package or the JAR package is decompressed to the path.

-

# Compress all class files in the current directory into Hello.jar and display the compression process. If the Hello.jar files do not exist, create them. Otherwise, clear the directory.

-

jar cvf Hello.jar *.class

-
- -## Class Library - -The Java class library is implemented as a package, which is a collection of classes and interfaces. The Java compiler generates a bytecode file for each class, and the file name is the same as the class name. Therefore, conflicts may occur between classes with the same name. In the Java language, a group of classes and interfaces are encapsulated in a package. Class namespaces can be effectively managed by package. Classes in different packages do not conflict even if they have the same name. This solves the problem of conflicts between classes with the same name and facilitates the management of a large number of classes and interfaces. It also ensures the security of classes and interfaces. - -In addition to many packages provided by Java, developers can customize packages by collecting compiled classes and interfaces into a package for future use. - -Before using a custom package, you need to declare the package. - -### Package Declaration - -The declaration format of a package is package pkg1\[.pkg2\[.pkg3...\]\]. - -To declare a package, you must create a directory. The subdirectory name must be the same as the package name. Then declare the package at the beginning of the class file that needs to be placed in the package, indicating that all classes of the file belong to the package. The dot \(.\) in the package declaration indicates the directory hierarchy. If the source program file does not contain the package statement, the package is specified as an anonymous package. An anonymous package does not have a path. Generally, Java still stores the classes in the source file in the current working directory \(that is, the directory where the Java source files are stored\). - -The package declaration statement must be added to the beginning of the source program file and cannot be preceded by comments or spaces. If you use the same package declaration statement in different source program files, you can include the classes in different source program files in the same package. - -### Package Reference - -In Java, there are two methods to use the common classes in the package provided by Java or the classes in the custom package. - -- Add the package name before the name of the class to be referenced. - - For example, name.A obj=new name.A \(\) - - **name** indicates the package name, **A** indicates the class name, and **obj** indicates the object. This string indicates that class **A** in the **name** package is used to define an object **obj** in the program. - - Example: Create a test object of the Test class in the example package. - - ``` - example.Test test = new example.Test(); - ``` - -- Use **import** at the beginning of the file to import the classes in the package. - - The format of the **import** statement is import pkg1\[.pkg2\[.pkg3...\]\].\(classname | \*\). - - **pkg1\[.pkg2\[.pkg3...\]\]** indicates the package level, and **classname** indicates the class to be imported. If you want to import multiple classes from a package, you can use the wildcard \(\*\) instead. - - Example: Import the **Test** class in the **example** package. - - ``` - import example.Test; - ``` - - Example: Import the entire **example** package. - - ``` - import example.*; - ``` - - -## Examples - - - -### Compiling a Java Program Without a Package - -1. Run the **cd** command to go to the code directory. The **~/code** directory is used as an example. The command is as follows: - - ``` - $ cd ~/code - ``` - -2. Compile the Hello World program and save it as **HelloWorld.java**. The following uses the Hello World program as an example. The command is as follows: - - ``` - $ vi HelloWorld.java - ``` - - Code example: - - ``` - public class HelloWorld { - public static void main(String[] args) { - System.out.println("Hello World"); - } - } - ``` - -3. Run the following command to compile the code in the code directory: - - ``` - $ javac HelloWorld.java - ``` - - If no error is reported, the execution is successful. - -4. After the compilation is complete, the HelloWorld.class file is generated. You can run the **java** command to view the result. The following is an example: - - ``` - $ java HelloWorld - Hello World - ``` - - -### Compiling a Java Program with a Package - -1. Run the **cd** command to go to the code directory. The **~/code** directory is used as an example. Create the **~/code/Test/my/example**, **~/code/Hello/world/developers**, and **~/code/Hi/openos/openeuler** subdirectories in the directory to store source files. - - ``` - $ cd ~/code - $ mkdir -p Test/my/example - $ mkdir -p Hello/world/developers - $ mkdir -p Hi/openos/openeuler - ``` - -2. Run the **cd** command to go to the **~/code/Test/my/example** directory and create **Test.java**. - - ``` - $ cd ~/code/Test/my/example - $ vi Test.java - ``` - - The following is an example of the Test.java code: - - ``` - package my.example; - import world.developers.Hello; - import openos.openeuler.Hi; - public class Test { - public static void main(String[] args) { - Hello me = new Hello(); - me.hello(); - Hi you = new Hi(); - you.hi(); - } - } - ``` - -3. Run the **cd** command to go to the **~/code/Hello/world/developers** directory and create **Hello.java**. - - ``` - $ cd ~/code/Hello/world/developers - $ vi Hello.java - ``` - - The following is an example of the Hello.java code: - - ``` - package world.developers; - public class Hello { - public void hello(){ - System.out.println("Hello, openEuler."); - } - } - ``` - -4. Run the **cd** command to go to the **~/code/Hi/openos/openeuler** directory and create **Hi.java**. - - ``` - $ cd ~/code/Hi/openos/openeuler - $ vi Hi.java - ``` - - The following is an example of the Hi.java code: - - ``` - package openos.openeuler; - public class Hi { - public void hi(){ - System.out.println("Hi, the global developers."); - } - } - ``` - -5. Run the **cd** command to go to the **~/code** directory and use javac to compile the source file. - - ``` - $ cd ~/code - $ javac -classpath Hello:Hi Test/my/example/Test.java - ``` - - After the command is executed, the **Test.class**, **Hello.class**, and **Hi.class** files are generated in the **~/code/Test/my/example**, **~/code/Hello/world/developers**, and **~/code/Hi/openos/openeuler** directories. - -6. Run the **cd** command to go to the **~/code** directory and run the **Test** program using Java. - - ``` - $ cd ~/code - $ java -classpath Test:Hello:Hi my/example/Test - ``` - - The command output is as follows: - - ``` - Hello, openEuler. - Hi, the global developers. - ``` diff --git a/docs/en/docs/ApplicationDev/using-make-for-compilation.md b/docs/en/docs/ApplicationDev/using-make-for-compilation.md deleted file mode 100644 index 199f76d06425021e9716e490ebc1e16a572d2661..0000000000000000000000000000000000000000 --- a/docs/en/docs/ApplicationDev/using-make-for-compilation.md +++ /dev/null @@ -1,377 +0,0 @@ -# Using Make for Compilation - -This chapter describes the basic knowledge of make compilation and provides examples for demonstration. For more information about Make, run the **man make** command. - - -- [Using Make for Compilation](#using-make-for-compilation) - - [Overview](#overview) - - [Basics](#basics) - - [File Type](#file-type) - - [make Work Process](#make-work-process) - - [make Options](#make-options) - - [Makefiles](#makefiles) - - [Makefile Structure](#makefile-structure) - - [Makefile Contents](#makefile-contents) - - [Examples](#examples) - - [Example of Using Makefile to Implement Compilation](#example-of-using-makefile-to-implement-compilation) - - - -## Overview - -The GNU make utility \(usually abbreviated as make\) is a tool for controlling the generation of executable files from source files. make automatically identifies which parts of the complex program have changed and need to be recompiled. Make uses a configuration file called makefiles to control how the program is built. - -## Basics - - -### File Type - -[Table 1](#table634145764320) describes the file types that may be used in the makefiles file. - -**Table 1** File types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Extension (Suffix)

-

Description

-

.c

-

C source code file.

-

.C, .cc, or .cxx

-

C++ source code file.

-

.m

-

Objective-C source code file.

-

.s

-

Assembly language source code file.

-

.i

-

Preprocessed C source code file.

-

.ii

-

Preprocessed C++ source code file.

-

.S

-

Pre-processed assembly language source code file.

-

.h

-

Header file contained in the program.

-

.o

-

Target file after compilation.

-

.so

-

Dynamic link library, which is a special target file.

-

.a

-

Static link library.

-

.out

-

Executable files, which do not have a fixed suffix. The system distinguishes executable files from unexecutable files based on file attributes. If the name of an executable file is not given, GCC generates a file named a.out.

-
- -### make Work Process - -The process of deploying make to generate an executable file from the source code file is described as follows: - -1. The make command reads the Makefiles, including the files named GNUmakefile, makefile, and Makefile in the current directory, the included makefile, and the rule files specified by the **-f**, **\-\-file**, and **\-\-makefile** options. -2. Initialize variables. -3. Derive implicit rules, analyze dependencies, and create a dependency chain. -4. Determine which targets need to be regenerated based on the dependency chain. -5. Run a command to generate the final file. - -### make Options - -make command format: **make** \[_option_\]... \[_target_\]... - -In the preceding command: - -_option_ : parameter option. - -_target_ : target specified in Makefile. - -[Table 2](#table261872312343) describes the common make options. - -**Table 2** Common make options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

options Value

-

Description

-

-C dir, \-\-directory=dir

-

Specifies dir as the working directory after the make command starts to run.

-

When there are multiple -C options, the final working directory of make is the relative path of the first directory.

-

-d

-

Displays all debugging information during execution of the make command. You can use the -d option to display all the information during the construction of the dependency chain and the reconstruction of the target.

-

-e, \-\-environment-overrides

-

Overwrites the variable definition with the same name in Makefile with the environment variable definition.

-

-f file, \-\-file=file,

-

\-\-makefile=file

-

Specifies the file as the Makefile for the make command.

-

-p, \-\-help

-

Displays help information.

-

-i, \-\-ignore-errors

-

Ignores the errors occurred during the execution.

-

-k, \-\-keep-going

-

When an error occurs during command execution, the make command is not terminated. The make command executes all commands as many as possible until a known error occurs.

-

-n, \-\-just-print, \-\-dry-run

-

Simulates the execution of commands (including the commands starting with @) in the actual execution sequence. This command is used only to display the execution process and has no actual execution effect.

-

-o file, \-\-old-file=file, \-\-assume-old=file

-

The specified file does not need to be rebuilt even if its dependency has expired, and no target of this dependency file is rebuilt.

-

-p, \-\-print-date-base

-

Before the command is executed, all data of Makefile read by make and the version information of make are printed. If you only need to print the data, run the make -qp command to view the preset rules and variables before the make command is executed. You can run the make -p -f /dev/null command.

-

-r, \-\-no-builtin-rules

-

Ignores the use of embedded implicit rules and the implicit suffix list of all suffix rules.

-

-R, \-\-no-builtin-variabes

-

Ignores embedded hidden variables.

-

-s, \-\-silent, \-\-quiet

-

Cancels the printing during the command execution.

-

-S, \-\-no-keep-going, \-\-stop

-

Cancels the -k option. In the recursive make process, the sub-make inherits the upper-layer command line option through the MAKEFLAGS variable. You can use the -S option in the sub-make to cancel the -k option transferred by the upper-layer command, or cancel the -k option in the system environment variable MAKEFLAGS.

-

-t, \-\-touch

-

Updates the timestamp of all target files to the current system time. Prevents make from rebuilding all outdated target files.

-

-v, version

-

Displays the make version.

-
- -## Makefiles - -Make is a tool that uses makefiles for compilation, linking, installation, and cleanup, so as to generate executable files and other related files from source code files. Therefore, makefiles describe the compilation and linking rules of the entire project, including which files need to be compiled, which files do not need to be compiled, which files need to be compiled first, which files need to be compiled later, and which files need to be rebuilt. The makefiles automate project compilation. You do not need to manually enter a large number of source files and parameters each time. - -This chapter describes the structure and main contents of makefiles. For more information about makefiles, run the **info make** command. - -### Makefile Structure - -The makefile file structure is as follows: - -_targets_:_prereguisites_ - -_command_ - -or - -_targets_:_prerequisites_;_command_ - -_command_ - -In the preceding information: - -- _targets_ : targets, which can be target files, executable files, or tags. -- _prerequisites_ : dependency files, which are the files or targets required for generating the _targets_. There can be multiple or none of them. -- _command_ : command \(any shell command\) to be executed by make. Multiple commands are allowed, and each command occupies a line. -- Use colons \(:\) to separate the target files from the dependency files. Press **Tab** at the beginning of each command line. - -The makefile file structure indicates the output target, the object on which the output target depends, and the command to be executed for generating the target. - -### Makefile Contents - -A makefile file consists of the following contents: - -- Explicit rule - - Specify the dependency, such as the file to be generated, dependency file, and generated command. - -- Implicit rule - - Specify the rule that is automatically derived by make. The make command supports the automatic derivation function. - -- Variable definition -- File indicator - - The file indicator consists of three parts: - - - Inclusion of other makefiles, for example, include xx.md - - Selective execution, for example, \#ifdef - - Definition of multiple command lines, for example, define...endef. \(define ... endef\) - -- Comment - - The comment starts with a number sign \(\#\). - - -## Examples - - - -### Example of Using Makefile to Implement Compilation - -1. Run the **cd** command to go to the code directory. The **~/code** directory is used as an example. - - ``` - $ cd ~/code - ``` - -2. Create a header file **hello.h** and two functions **hello.c** and **main.c**. - - ``` - $ vi hello.h - $ vi hello.c - $ vi main.c - ``` - - The following is an example of the **hello.h** code: - - ``` - #pragma once - #include - void hello(); - ``` - - The following is an example of the **hello.c** code: - - ``` - #include "hello.h" - void hello() - { - int i=1; - while(i<5) - { - printf("The %dth say hello.\n", i); - i++; - } - } - - ``` - - The following is an example of the **main.c** code: - - ``` - #include "hello.h" - #include - int main() - { - hello(); - return 0; - } - ``` - -3. Create the makefile. - - ``` - $ vi Makefile - ``` - - The following provides an example of the makefile content: - - ``` - main:main.o hello.o - gcc -o main main.o hello.o - main.o:main.c - gcc -c main.c - hello.o:hello.c - gcc -c hello.c - clean: - rm -f hello.o main.o main - ``` - -4. Run the **make** command. - - ``` - $ make - ``` - - After the command is executed, the commands executed in makefile are printed. If you do not need to print the information, add the **-s** option to the **make** command. - - gcc -c main.c - - gcc -c hello.c - - gcc -o main main.o hello.o - -5. Execute the ./main target. - - ``` - $ ./main - ``` - - After the command is executed, the following information is displayed: - - The 1th say hello. - - The 2th say hello. - - The 3th say hello. - - The 4th say hello. diff --git a/docs/en/docs/Container/appendix-1.md b/docs/en/docs/Container/appendix-1.md deleted file mode 100644 index 88feb8756850a0852848f376b892f699394645f9..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/appendix-1.md +++ /dev/null @@ -1,94 +0,0 @@ -## Appendix - -- [Appendix](#appendix-1) - - [Command Line Interface List](#command-line-interface-list) - - -## Command Line Interface List - -This section lists commands in system containers, which are different from those in common containers. For details about other commands, refer to sections related to the iSulad container engine or run the **isula _XXX_ --help** command. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Parameters

-

Value Description

-

isula create/run

-

--external-rootfs

-
  • Variable of the string type.
  • Absolute path on the host.
  • Specifies the rootfs of a VM when running a system container.
-

--system-container

-
  • Boolean variable.
  • Specifies whether a container is a system container. In a system container scenario, this function must be enabled.
-

--add-host

-
  • Variable of the string type.
  • Specifies the hosts configuration for a container. The format is hostname:ip. Multiple values can be set.
-

--dns, --dns-option, --dns-search

-
  • Variable of the string type.
  • Specifies the DNS configuration for a container. Multiple values can be set.
-

--ns-change-opt

-
  • Variable of the string type.
  • Container namespace kernel parameter. The value can only be net or ipc. If multiple values are set, separate them with commas (,), for example, --ns-change-opt=net,ipc.
-

--oom-kill-disable

-
  • Boolean variable.
  • Indicates whether to enable the oom-kill-disable function.
-

--shm-size

-
  • Variable of the string type.
  • Sets the size of /dev/shm. The default value is 64 MB. The unit can be byte (B), kilobyte (KB), megabyte (MB), gigabyte (GB), terabyte (TB), or petabyte (PB).
-

--sysctl

-
  • Variable of the string type.
  • Specifies container kernel parameters. The format is key=value. Multiple values can be set. The sysctl whitelist is as follows:
-

kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced, kernel.pid_max, net., and fs.mqueue

-
NOTE:

The kernel.pid_max kernel parameter in a container must be able to be namespaced. Otherwise, an error is reported.

-

Parameter restrictions (including the parameter types and value ranges) of the sysctl whitelist in a container must be the same as those of kernel parameters in the physical machine.

-
-

--env-target-file

-
  • Variable of the string type.
  • Specifies the env persistent file path. (The path must be an absolute path and the file must be in the rootfs directory.) The file size cannot exceed 10 MB. If the value of --env conflicts with that of env in the file, the value of --env takes effect.
  • The root directory of the absolute path is the rootfs root directory. That is, to set the file path to /etc/environment in the container, you need to specify env-target-file=/etc/environment only.
-

--cgroup-parent

-
  • Variable of the string type.
  • Specifies the cgroup parent directory of a container. The cgroup root directory is /sys/fs/cgroup/controller.
-

--host-channel

-
  • Variable of the string type.
  • Specifies the memory space shared between the host and a container (tmpfs). The format is as follows:
-

host path:container path:rw/ro:size limit

-

--files-limit

-
  • Variable of the string type.
  • Specifies the maximum number of file handles in a container. The value must be an integer.
-

--user-remap

-
  • Variable of the string type.
  • The parameter format is uid:gid:offset.
-
- diff --git a/docs/en/docs/Container/appendix-2.md b/docs/en/docs/Container/appendix-2.md deleted file mode 100644 index f5342bf482626a31862cf0e1d902874e8bc6f336..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/appendix-2.md +++ /dev/null @@ -1,488 +0,0 @@ -# Appendix - -- [Appendix](#appendix-2) - - [configuration.toml](#configuration-toml) - - [APIs](#apis) - -## configuration.toml - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The value of each field in the **configuration.toml** file is subject to the **configuration.toml** file in the **kata-containers-<**_version_**\>.rpm package**. You cannot set any field in the configuration file. - -``` -[hypervisor.qemu] -path: specifies the execution path of the virtualization QEMU. -kernel: specifies the execution path of the guest kernel. -initrd: specifies the guest initrd execution path. -image: specifies the execution path of the guest image (not applicable). -machine_type: specifies the type of the analog chip. The value is virt for the ARM architecture and pc for the x86 architecture. -kernel_params: specifies the running parameters of the guest kernel. -firmware: specifies the firmware path. If this parameter is left blank, the default firmware is used. -machine_accelerators: specifies an accelerator. -default_vcpus: specifies the default number of vCPUs for each SB/VM. -default_maxvcpus: specifies the default maximum number of vCPUs for each SB/VM. -default_root_ports: specifies the default number of root ports for each SB/VM. -default_bridges: specifies the default number of bridges for each SB/VM. -default_memory: specifies the default memory size of each SB/VM. The default value is 1024 MiB. -memory_slots: specifies the number of memory slots for each SB/VM. The default value is 10. -memory_offset: specifies the memory offset. The default value is 0. -disable_block_device_use: disables the block device from being used by the rootfs of the container. -shared_fs: specifies the type of the shared file system. The default value is virtio-9p. -virtio_fs_daemon: specifies the path of the vhost-user-fs daemon process. -virtio_fs_cache_size: specifies the default size of the DAX cache. -virtio_fs_cache: specifies the cache mode. -block_device_driver: specifies the driver of a block device. -block_device_cache_set: specifies whether to set cache-related options for a block device. The default value is false. -block_device_cache_direct: specifies whether to enable O_DIRECT. The default value is false. -block_device_cache_noflush: specifies whether to ignore device update requests. The default value is false. -enable_iothreads: enables iothreads. -enable_mem_prealloc: enables VM RAM pre-allocation. The default value is false. -enable_hugepages: enables huge pages. The default value is false. -enable_swap: enables the swap function. The default value is false. -enable_debug: enables QEMU debugging. The default value is false. -disable_nesting_checks: disables nested check. -msize_9p = 8192: specifies the number of bytes transmitted in each 9p packet. -use_vsock: uses vsocks to directly communicate with the agent (the prerequisite is that vsocks is supported). The default value is false. -hotplug_vfio_on_root_bus: enables the hot swap of the VFIO device on the root bus. The default value is false. -disable_vhost_net: disables vhost_net. The default value is false. -entropy_source: specifies the default entropy source. -guest_hook_path: specifies the binary path of the guest hook. - -[factory] -enable_template: enables the VM template. The default value is false. -template_path: specifies the template path. -vm_cache_number: specifies the number of VM caches. The default value is 0. -vm_cache_endpoint: specifies the address of the Unix socket used by the VMCache. The default value is /var/run/kata-containers/cache.sock. - -[proxy.kata] -path: specifies the kata-proxy running path. -enable_debug: enables proxy debugging. The default value is false. - -[shim.kata] -path: specifies the running path of kata-shim. -enable_debug: enables shim debugging. The default value is false. -enable_tracing: enables shim opentracing. - -[agent.kata] -enable_debug: enables the agent debugging function. The default value is false. -enable_tracing: enables the agent tracing function. -trace_mode: specifies the trace mode. -trace_type: specifies the trace type. -enable_blk_mount: enables guest mounting of the block device. - -[netmon] -enable_netmon: enables network monitoring. The default value is false. -path: specifies the kata-netmon running path. -enable_debug: enables netmon debugging. The default value is false. - -[runtime] -enable_debug: enables runtime debugging. The default value is false. -enable_cpu_memory_hotplug: enables CPU and memory hot swap. The default value is false. -internetworking_model: specifies the network interconnection mode between VMs and containers. -disable_guest_seccomp: disables the seccemp security mechanism in the guest application. The default value is true. -enable_tracing: enables runtime opentracing. The default value is false. -disable_new_netns: disables network namespace creation for the shim and hypervisor processes. The default value is false. -experimental: enables the experimental feature, which does not support user-defined configurations. -``` - -## APIs - -**Table 1** Commands related to the kata-runtime network - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Subcommand

-

File Example

-

Field

-

Description

-

Remarks

-

kata-network

-
NOTE:
  • The kata-network command must be used in groups. Network devices that are not added using kata-runtime kata-network cannot be deleted or listed using kata-runtime kata-network. The reverse is also true.
  • kata-runtime kata-network imports configuration parameters through a file or stdin.
-
-

add-iface

-
NOTE:
  • An interface can be added to only one container.
  • The execution result is subject to the returned value (non-zero return value).
-
-

  

-

{

-

"device":"tap1",

-

"name":"eth1",

-

"IPAddresses":[{"address":"172.17.1.10","mask":"24"}],

-

"mtu":1300,

-

"hwAddr":"02:42:20:6f:a2:80"

-

"vhostUserSocket":"/usr/local/var/run/openvswitch/vhost-user1"

-

}

-

  

-

device

-

Sets the name of the NIC on a host.

-

Mandatory. The value can contain a maximum of 15 characters, including letters, digits, underscores (\_), hyphens (-), and periods (.). It must start with a letter. The device name must be unique on the same host.

-

name

-

Sets the name of the NIC in the container.

-

Mandatory. The value can contain a maximum of 15 characters, including letters, digits, underscores (\_), hyphens (-), and periods (.). It must start with a letter. Ensure that the name is unique in the same sandbox.

-

IPAddresses

-

Sets the IP address of an NIC.

-

Optional.

-

Currently, one IP address can be configured for each NIC. If no IP address is configured for the NIC, no IP address will be configured in the container, either.

-

mtu

-

Sets the MTU of an NIC.

-

Mandatory.

-

The value ranges from 46 to 9600.

-

hwAddr

-

Sets the MAC address of an NIC.

-

Mandatory.

-

vhostUserSocket

-

Sets the DPDK polling socket path.

-

Optional.

-

The path contains a maximum of 128 bytes. The naming rule can contain digits, letters, and hyphens (-). The path name must start with a letter.

-

del-iface

-

{

-

"name":"eth1"

-

}

-

None

-

Deletes an NIC from a container.

-
NOTE:

When deleting a NIC, you can only delete it based on the name field in the NIC container. Kata does not identify other fields.

-
-

list-ifaces

-

None

-

None

-

Queries the NIC list in a container.

-

None

-

add-route

-

{

-

"dest":"172.17.10.10/24",

-

"gateway":"",

-

"device":"eth1"

-

}

-

dest

-

Sets the network segment corresponding to the route.

-

The value is in the format of <ip>/<mask>. <ip> is mandatory.

-

There are three cases:

-

1. Both IP address and mask are configured.

-

2. If only an IP address is configured, the default mask is 32.

-

3. If "dest":"default" is configured, there is no destination by default. In this case, the gateway needs to be configured.

-

gateway

-

Sets the next-hop gateway of the route.

-

When "dest":"default" is configured, the gateway is mandatory. In other cases, this parameter is optional.

-

device

-

Sets the name of the NIC corresponding to the route.

-

Mandatory.

-

The value contains a maximum of 15 characters.

-

del-route

-

{

-

"dest":"172.17.10.10/24"

-

}

-

None

-

Deletes a container routing rule.

-

dest is mandatory, and both device and gateway are optional.

-
NOTE:

Kata performs fuzzy match based on different fields and deletes the corresponding routing rules.

-
-

list-routes

-

None

-

None

-

Queries the route list in a container.

-

None

-
- -**Table 2** kata-ipvs command line interfaces - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Subcommand

-

Field

-

Parameter

-

Sub-parameter

-

Description

-

Remarks

-

kata-ipvs

-

ipvsadm

-

--parameters

-

-A, --add-service

-

-t, --tcp-service

-

-u, --udp-service

-

Virtual service type.

-

Mandatory. You can select --tcp-service or --udp-service. The format is ip:port. The value of port ranges from 1 to 65535.

-

Example:

-
kata-runtime kata-ipvs ipvsadm --parameters "--add-service --tcp-service 172.17.0.7:80 --scheduler rr --persistent 3000" <container-id>
-

-s, --scheduler

-

Load balancing scheduling algorithm.

-

Mandatory. Value range: rr|wrr|lc|wlc|lblc|lblcr|dh|sh|sed|nq.

-

-p, --persistent

-

Service duration.

-

Mandatory. The value ranges from 1 to 2678400, in seconds.

-

-E, --edit-service

-

-t, --tcp-service

-

-u, --udp-service

-

Virtual service type.

-

Mandatory. You can select --tcp-service or --udp-service. The format is ip:port. The value of port ranges from 1 to 65535.

-

-s, --scheduler

-

Load balancing scheduling algorithm.

-

Mandatory. Value range: rr|wrr|lc|wlc|lblc|lblcr|dh|sh|sed|nq.

-

-p, --persistent

-

Service duration.

-

Mandatory. The value ranges from 1 to 2678400, in seconds.

-

-D, --delete-service

-

-t, --tcp-service

-

-u, --udp-service

-

Virtual service type.

-

Mandatory. You can select --tcp-service or --udp-service. The format is ip:port. The value of port ranges from 1 to 65535.

-

-a, --add-server

-

-t, --tcp-service

-

-u, --udp-service

-

Virtual service type.

-

Mandatory. You can select --tcp-service or --udp-service. The format is ip:port. The value of port ranges from 1 to 65535.

-

Example:

-
kata-runtime kata-ipvs ipvsadm --parameters "--add-server --tcp-service 172.17.0.7:80 --real-server 172.17.0.4:80 --weight 100" <container-id>
-

-r, --real-server

-

Real server address.

-

Mandatory. The format is ip:port. The value of port ranges from 1 to 65535.

-

-w, --weight

-

Weight

-

Optional. The value ranges from 0 to 65535.

-

-e, --edit-server

-

-t, --tcp-service

-

-u, --udp-service

-

Virtual service type.

-

Mandatory. You can select --tcp-service or --udp-service. The format is ip:port. The value of port ranges from 1 to 65535.

-

-r, --real-server

-

Real server address.

-

Mandatory. The format is ip:port. The value of port ranges from 1 to 65535.

-

-w, --weight

-

Weight

-

Optional. The value ranges from 0 to 65535.

-

-d, --delete-server

-

-t, --tcp-service

-

-u, --udp-service

-

Virtual service type.

-

Mandatory. You can select --tcp-service or --udp-service. The format is ip:port. The value of port ranges from 1 to 65535.

-

-r, --real-server

-

Real server address.

-

Mandatory. The format is ip:port. The value of port ranges from 1 to 65535.

-

-L, --list

-

-t, --tcp-service

-

-u, --udp-service

-

Queries virtual service information.

-

Optional.

-

Example:

-
kata-runtime kata-ipvs ipvsadm --parameters "--list --tcp-service ip:port" <container-id>
-

--set

-

--tcp

-

TCP timeout.

-

Mandatory. The value ranges from 0 to 1296000.

-

Example:

-
kata-runtime kata-ipvs ipvsadm --parameters "--set 100 100 200" <container-id>
-

--tcpfin

-

TCP FIN timeout.

-

Mandatory. The value ranges from 0 to 1296000.

-

--udp

-

UDP timeout.

-

Mandatory. The value ranges from 0 to 1296000.

-

--restore

-

-

-

Imports standard inputs in batches.

-

Rule files can be specified.

-

Example:

-
kata-runtime kata-ipvs ipvsadm --restore - < <rule file path> <container-id>
-
NOTE:

By default, the NAT mode is used for adding a single real server. To add real servers in batches, you need to manually add the -m option to use the NAT mode.

-

The following is an example of the rule file content:

-

-A -t 10.10.11.12:100 -s rr -p 3000

-

-a -t 10.10.11.12:100 -r 172.16.0.1:80 -m

-

-a -t 10.10.11.12:100 -r 172.16.0.1:81 -m

-

-a -t 10.10.11.12:100 -r 172.16.0.1:82 -m

-
-

cleanup

-

--parameters

-

-d, --orig-dst

-

Specifies the IP address.

-

Mandatory.

-

Example:

-
kata-runtime kata-ipvs cleanup --parameters "--orig-dst 172.17.0.4 --protonum tcp" <container-id>
-

-p, --protonum

-

Protocol type.

-

Mandatory. The value can be tcp or udp.

-
- diff --git a/docs/en/docs/Container/appendix.md b/docs/en/docs/Container/appendix.md deleted file mode 100644 index eae445ef363dee1bdff53655cf276259a7500811..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/appendix.md +++ /dev/null @@ -1,713 +0,0 @@ -# Appendix -- [Appendix](#appendix) - - [Command Line Parameters](#command-line-parameters) - - [CNI Parameters](#cni-parameters) - -## Command Line Parameters - -**Table 1** login command parameters - - - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

login

-

  

-

  

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-p, --password

-

Specifies the password for logging in to the registry.

-

--password-stdin

-

Specifies the password for obtaining the registry from standard input.

-

-u, --username

-

Specifies the username for logging in to the registry.

-
- -**Table 2** logout command parameters - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

logout

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-
- -**Table 3** pull command parameters - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

pull

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-
- -**Table 4** rmi command parameters - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

rmi

-

  

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-f, --force

-

Forcibly removes an image.

-
- -**Table 5** load command parameters - - - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

load

-

-H, --host (supported only by iSula)

-

Specifies the iSulad socket file path to be accessed.

-

-i, --input

-

Specifies where to import an image. If the image is of the docker type, the value is the image package path. If the image is of the embedded type, the value is the image manifest path.

-

--tag

-

Uses the image name specified by TAG instead of the default image name. This parameter is supported when the type is set to docker.

-

-t, --type

-

Specifies the image type. The value can be embedded or docker (default value).

-
- -**Table 6** images command parameters - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

images

-

  

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-q, --quit

-

Displays only the image name.

-
- -**Table 7** inspect command parameters - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

inspect

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-f, --format

-

Outputs using a template.

-

-t, --time

-

Timeout interval, in seconds. If the inspect command fails to query container information within the specified period, the system stops waiting and reports an error immediately. The default value is 120s. If the value is less than or equal to 0, the inspect command keeps waiting until the container information is obtained successfully.

-
- -## CNI Parameters - -**Table 1** CNI single network parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Type

-

Mandatory or Not

-

Description

-

cniVersion

-

string

-

Yes

-

CNI version. Only 0.3.0 and 0.3.1 are supported.

-

name

-

string

-

Yes

-

Network name, which is user-defined and must be unique.

-

type

-

string

-

Yes

-

Network type. The following types are supported:

-

underlay_ipvlan

-

overlay_l2

-

underlay_l2

-

vpc-router

-

dpdk-direct

-

phy-direct

-

ipmasp

-

bool

-

No

-

Configures the IP masquerade.

-

ipam

-

structure

-

No

-

For details, see the IPAM parameter definition.

-

ipam.type

-

string

-

No

-

IPAM type. The following types are supported:

-

(1) For underlay_l2, overlay_l2, and vpc-router networking, only the default value distributed_l2 is supported.

-

(2) For underlay_ipvlan networking, the default value is distributed_l2. In the CCN scenario, only null and fixed are supported. In the CCE and FST 5G core scenarios, only null and distributed_l2 are supported.

-

(3) For phy-direct and dpdk-direct networking, the default value is l2, and optional values are null and distributed_l2. In the FST 5G core scenario, only null and distributed_l2 are supported.

-

Description:

-

If the value is out of the range (for example, host-local), Canal automatically sets the value to the default value and no error is returned.

-

null: Canal is not used to manage IP addresses.

-

fixed: fixed IP address, which is used in the CCN scenario.

-

l2: This value is not used in any scenario.

-

distributed_l2: The distributed small subnet is used to manage IP addresses.

-

ipam.subnet

-

string

-

No

-

Subnet information. Canal supports the subnet mask ranging from 8 to 29. The IP address cannot be a multicast address (for example, 224.0.0.0/4), reserved address (240.0.0.0/4), local link address (169.254.0.0/16), or local loop address (127.0.0.0/8).

-

ipam.gateway

-

string

-

No

-

Gateway IP address.

-

ipam.range-start

-

string

-

No

-

Available start IP address.

-

ipam.range-end

-

string

-

No

-

Available end IP address.

-

ipam.routes

-

structure

-

No

-

Subnet list. Each element is a route dictionary. For details, see the route definition.

-

ipam.routes.dst

-

string

-

No

-

Destination network.

-

ipam.routes.gw

-

string

-

No

-

Gateway address.

-

dns

-

structure

-

No

-

Contains some special DNS values.

-

dns.nameservers

-

[]string

-

No

-

NameServers

-

dns.domain

-

string

-

No

-

Domain

-

dns.search

-

[]string

-

No

-

Search

-

dns.options

-

[]string

-

No

-

Options

-

multi_entry

-

int

-

No

-

Number of IP addresses required by a vNIC. The value ranges from 0 to 16. For physical passthrough, a maximum of 128 IP addresses can be applied for a single NIC.

-

backup_mode

-

bool

-

No

-

Active/Standby mode, which is used only for phy-direct and dpdk-direct networking.

-

vlanID

-

int

-

No

-

The value ranges from 0 to 4095. It can be specified through PaaS.

-

vlan_inside

-

bool

-

No

-

The value true indicates that the VLAN function is implemented internally on the node, and the value false indicates that the VLAN function is implemented externally.

-

vxlanID

-

int

-

No

-

The value ranges from 0 to 16777215. It can be specified through PaaS.

-

vxlan_inside

-

bool

-

No

-

The value true indicates that the VLAN function is implemented internally on the node, and the value false indicates that the VLAN function is implemented externally.

-

action

-

string

-

No

-

This parameter can be used only with the special container ID 000000000000.

-

Create: creates a network.

-

Delete: deletes a network.

-

args

-

map[string]interface{}

-

No

-

Key-value pair type.

-

runtimeConfig

-

structure

-

No

-

None

-

capabilities

-

structure

-

No

-

None

-
- -**Table 2** CNI args parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Type

-

Mandatory

-

Description

-

K8S_POD_NAME

-

string

-

No

-

Set this parameter when you apply for a fixed IP address (runtimeConfig.ican_caps.fixed_ip is set to true).

-

K8S_POD_NAMESPACE

-

string

-

No

-

Set this parameter when you apply for a fixed IP address (runtimeConfig.ican_caps.fixed_ip is set to true).

-

SECURE_CONTAINER

-

string

-

No

-

Secure container flag.

-

multi_port

-

int

-

No

-

The value ranges from 1 to 8. The default value is 1. Specifies the number of passthrough NICs. Only phy-direct and dpdk-direct networks are supported.

-

phy-direct

-

string

-

No

-

Specifies the NIC to be connected when you create an SR-IOV container network.

-

dpdk-direct

-

string

-

No

-

Specifies the NIC to be connected when you create a DPDK passthrough container network.

-

tenant_id

-

string

-

No

-

Indicates the tenant ID.

-

Only vpc-router networks are supported.

-

vpc_id

-

string

-

No

-

VPC ID.

-

Only vpc-router networks are supported.

-

secret_name

-

string

-

No

-

Specifies the AK/SK object name on the K8S APIServer.

-

Only vpc-router networks are supported.

-

For details, see the configuration of VPC-Router logical networks.

-

IP

-

string

-

No

-

IP address specified by the user, in the format of 192.168.0.10.

-

K8S_POD_NETWORK_ARGS

-

string

-

No

-

Specifies an IP address, in the format of 192.168.0.10. If both IP and K8S_POD_NETWORK_ARGS in args are not empty, the value of K8S_POD_NETWORK_ARGS prevails.

-

INSTANCE_NAME

-

string

-

No

-

INSTANCE ID.

-

Refer to fixed IP addresses that support containers.

-

dist_gateway_disable

-

bool

-

No

-

The value true indicates that no gateway is created, and the value false indicates that a gateway is created.

-

phynet

-

string or []string

-

No

-

Specifies the name of the physical plane to be added. The physical plane name is predefined and corresponds to that in the SNC system. When two plane names are entered, the active and standby planes are supported. Example: phy_net1 or ["phy_net2","phy_net3"]

-

endpoint_policies

-

struct

-

No

-

"endpoint_policies": [

-

{

-

"Type": "",

-

"ExceptionList": [

-

""

-

],

-

"NeedEncap": true,

-

"DestinationPrefix": ""

-

}

-

]

-

port_map

-

struct

-

No

-

On a NAT network, container ports can be advertised to host ports.

-

"port_map": [

-

{

-

"local_port": number,

-

"host_port": number,

-

"protocol": [string...]

-

}...

-

]

-
- -**Table 3** CNI multiple network parameters - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Type

-

Mandatory

-

Description

-

cniVersion

-

string

-

Yes

-

CNI version. Only 0.3.0 and 0.3.1 are supported.

-

name

-

string

-

Yes

-

Network name, which is user-defined and must be unique.

-

plugins

-

struct

-

Yes

-

For details, see CNI single network parameters

-
- diff --git a/docs/en/docs/Container/application-scenarios-2.md b/docs/en/docs/Container/application-scenarios-2.md deleted file mode 100644 index 5346a100ee36da8190c476a4751e0d414e8ca2ec..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/application-scenarios-2.md +++ /dev/null @@ -1,4 +0,0 @@ -# Application Scenarios - -This section describes how to use a secure container. - diff --git a/docs/en/docs/Container/application-scenarios.md b/docs/en/docs/Container/application-scenarios.md deleted file mode 100644 index fe74c96c762fd08199445dbda6c552d38dcce197..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/application-scenarios.md +++ /dev/null @@ -1,5 +0,0 @@ -# Application Scenarios - -This section describes how to use the iSulad. - - diff --git a/docs/en/docs/Container/checking-the-container-health-status.md b/docs/en/docs/Container/checking-the-container-health-status.md deleted file mode 100644 index 12a1d0f114d15b87af2edfd5cd09bac43b43a170..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/checking-the-container-health-status.md +++ /dev/null @@ -1,71 +0,0 @@ -# Checking the Container Health Status - -- [Checking the Container Health Status](#checking-the-container-health-status) - - [Scenarios](#scenarios-7) - - [Configuration Methods](#configuration-methods) - - [Check Rules](#check-rules) - - [Usage Restrictions](#usage-restrictions-8) - - - -## Scenarios - -In the production environment, bugs are inevitable in applications provided by developers or services provided by platforms. Therefore, a management system is indispensable for periodically checking and repairing applications. The container health check mechanism adds a user-defined health check function for containers. When a container is created, the **--health-cmd** option is configured so that commands are periodically executed in the container to monitor the health status of the container based on return values. - -## Configuration Methods - -Configurations during container startup: - -``` -isula run -itd --health-cmd "echo iSulad >> /tmp/health_check_file || exit 1" --health-interval 5m --health-timeout 3s --health-exit-on-unhealthy busybox bash -``` - -The configurable options are as follows: - -- **--health-cmd**: This option is mandatory. If **0** is returned after a command is run in a container, the command execution succeeds. If a value other than **0** is returned, the command execution fails. -- **--health-interval**: interval between two consecutive command executions. The default value is **30s**. The value ranges from **1s** to the maximum value of Int64 \(unit: nanosecond\). If the input parameter is set to **0s**, the default value is used. -- **--health-timeout**: maximum duration for executing a single check command. If the execution times out, the command execution fails. The default value is **30s**. The value ranges from **1s** to the maximum value of Int64 \(unit: nanosecond\). If the input parameter is set to **0s**, the default value is used. Only containers whose runtime is of the LCR type are supported. -- **--health-start-period**: container initialization time. The default value is **0s**. The value ranges from **1s** to the maximum value of Int64 \(unit: nanosecond\). -- **--health-retries**: maximum number of retries for the health check. The default value is **3**. The maximum value is the maximum value of Int32. -- **--health-exit-on-unhealthy**: specifies whether to kill a container when it is unhealthy. The default value is **false**. - -## Check Rules - -1. After a container is started, the container status is **health:starting**. -2. After the period specified by **start-period**, the **cmd** command is periodically executed in the container at the interval specified by **interval**. That is, after the command is executed, the command will be executed again after the specified period. -3. If the **cmd** command is successfully executed within the time specified by **timeout** and the return value is **0**, the check is successful. Otherwise, the check fails. If the check is successful, the container status changes to **health:healthy**. -4. If the **cmd** command fails to be executed for the number of times specified by **retries**, the container status changes to **health:unhealthy**, and the container continues the health check. -5. When the container status is **health:unhealthy**, the container status changes to **health:healthy** if a check succeeds. -6. If **--exit-on-unhealthy** is set, and the container exits due to reasons other than being killed \(the returned exit code is **137**\), the health check takes effect only after the container is restarted. -7. When the **cmd** command execution is complete or times out, Docker daemon will record the start time, return value, and standard output of the check to the configuration file of the container. A maximum of five records can be recorded. In addition, the configuration file of the container stores health check parameters. -8. When the container is running, the health check status is written into the container configurations. You can run the **isula inspect** command to view the status. - -``` -"Health": { - "Status": "healthy", - "FailingStreak": 0, - "Log": [ - { - "Start": "2018-03-07T07:44:15.481414707-05:00", - "End": "2018-03-07T07:44:15.556908311-05:00", - "ExitCode": 0, - "Output": "" - }, - { - "Start": "2018-03-07T07:44:18.557297462-05:00", - "End": "2018-03-07T07:44:18.63035891-05:00", - "ExitCode": 0, - "Output": "" - }, - ...... -} -``` - -## Usage Restrictions - -- A maximum of five health check status records can be stored in a container. The last five records are saved. -- If health check parameters are set to **0** during container startup, the default values are used. -- After a container with configured health check parameters is started, if iSulad daemon exits, the health check is not executed. After iSulad daemon is restarted, the health status of the running container changes to **starting**. Afterwards, the check rules are the same as above. -- If the health check fails for the first time, the health check status will not change from **starting** to **unhealthy** until the specified number of retries \(**--health-retries**\) is reached, or to **healthy** until the health check succeeds. -- The health check function of containers whose runtime is of the Open Container Initiative \(OCI\) type needs to be improved. Only containers whose runtime is of the LCR type are supported. - diff --git a/docs/en/docs/Container/command-reference.md b/docs/en/docs/Container/command-reference.md deleted file mode 100644 index d7578e549bf15d14f944ccd1fda94911fead4829..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/command-reference.md +++ /dev/null @@ -1,7 +0,0 @@ -# Command Reference - -- [Command Reference](#command-reference) - - [Container Engine](#container-engine) - - [Container Management](#container-management-40) - - [Image Management](#image-management-43) - - [Statistics](#statistics) diff --git a/docs/en/docs/Container/configurable-cgroup-path.md b/docs/en/docs/Container/configurable-cgroup-path.md deleted file mode 100644 index d5d24d9d0b195d249fc536e32f5022bfa58f0e39..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/configurable-cgroup-path.md +++ /dev/null @@ -1,99 +0,0 @@ -# Configurable Cgroup Path - -- [Configurable Cgroup Path](#configurable-cgroup-path) - - -## Function Description - -System containers provide the capabilities of isolating and reserving container resources on hosts. You can use the **--cgroup-parent** parameter to specify the cgroup directory used by a container to another directory, thereby flexibly allocating host resources. For example, if the cgroup parent path of containers A, B, and C is set to **/lxc/cgroup1**, and the cgroup parent path of containers D, E, and F is set to **/lxc/cgroup2**, the containers are divided into two groups through the cgroup paths, implementing resource isolation at the cgroup level. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--cgroup-parent

-
  • Variable of the string type.
  • Specifies the cgroup parent path of the container.
-
- -In addition to specifying the cgroup parent path for a system container using commands, you can also specify the cgroup paths of all containers by modifying the startup configuration files of the iSulad container engine. - - - - - - - - - - - - -

Configuration File Path

-

Parameter

-

Description

-

/etc/isulad/daemon.json

-

--cgroup-parent

-
  • Variable of the string type.
  • Specifies the default cgroup parent path of the container.
  • Example: "cgroup-parent": "/lxc/mycgroup"
-
- -## Constraints - -- If the **cgroup parent** parameter is set on both the daemon and client, the value specified on the client takes effect. -- If container A is started before container B, the cgroup parent path of container B is specified as the cgroup path of container A. When deleting a container, you need to delete container B and then container A. Otherwise, residual cgroup resources exist. - -## Example - -Start a system container and specify the **--cgroup-parent** parameter. - -``` -[root@localhost ~]# isula run -tid --cgroup-parent /lxc/cgroup123 --system-container --external-rootfs /root/myrootfs none init -115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -``` - -Check the cgroup information of the init process in the container. - -``` -[root@localhost ~]# isula inspect -f "{{json .State.Pid}}" 11 -22167 -[root@localhost ~]# cat /proc/22167/cgroup -13:blkio:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -12:perf_event:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -11:cpuset:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -10:pids:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -9:rdma:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -8:devices:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -7:hugetlb:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -6:memory:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -5:net_cls,net_prio:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -4:cpu,cpuacct:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -3:files:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -2:freezer:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -1:name=systemd:/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e/init.scope -0::/lxc/cgroup123/115878a4dfc7c5b8c62ef8a4b44f216485422be9a28f447a4b9ecac4609f332e -``` - -The cgroup parent path of the container is set to **/sys/fs/cgroup/**__**/lxc/cgroup123**. - -In addition, you can configure the container daemon file to set the cgroup parent paths for all containers. For example: - -``` -{ - "cgroup-parent": "/lxc/cgroup123", -} -``` - -Restart the container engine for the configuration to take effect. - diff --git a/docs/en/docs/Container/configuring-networking-for-a-secure-container.md b/docs/en/docs/Container/configuring-networking-for-a-secure-container.md deleted file mode 100644 index 001355c89d3ee5d8426dfd8bb199310105275cfb..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/configuring-networking-for-a-secure-container.md +++ /dev/null @@ -1,346 +0,0 @@ -# Configuring Networking for a Secure Container - -- [Configuring Networking for a Secure Container](#configuring-networking-for-a-secure-container) - - -## TAP-based Network Support - -The secure container technology is implemented based on QEMU VMs. For a physical machine system, a secure container is equivalent to a VM. Therefore, the secure container may connect the VM to an external network in the Neutron network by using the test access point \(TAP\) technology. You do not need to pay attention to TAP device creation and bridging. You only need to hot add the specified TAP device \(with an existing host\) to the VM in the pause container and update the NIC information. - -Related commands are as follows: - -1. **Run the following command to add a TAP NIC for a started container:** - - ``` - $ cat ./test-iface.json | kata-runtime kata-network add-iface 6ec7a98 - - ``` - - In the preceding command, **6ec7a98** is the truncated container ID, and **test-infs.json** is the file that describes the NIC information. The following is an example: - - ``` - { - "device": "tap-test", - "name": "eth-test", - "IPAddresses": [ - { - "address": "172.16.0.3", - "mask": "16" - } - ], - "hwAddr":"02:42:20:6f:a3:69", - "mtu": 1500, - "vhostUserSocket":"/usr/local/var/run/openvswitch/vhost-user1", - "queues":5 - } - ``` - - The fields in the JSON file are described as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Field

-

Mandatory/Optional

-

Description

-

device

-

Mandatory

-

Name of the NIC on a host. The value can contain a maximum of 15 characters, including letters, digits, underscores (_), hyphens (-), and periods (.). It must start with a letter. The device name must be unique on the same host.

-

name

-

Mandatory

-

Name of the NIC in the container. The value can contain a maximum of 15 characters, including letters, digits, underscores (_), hyphens (-), and periods (.). It must start with a letter. The name must be unique in the same sandbox.

-

IPAddresses

-

Optional

-

IP address of the NIC. Currently, one IP address can be configured for each NIC. If no IP address is configured for the NIC, no IP address will be configured in the container, either.

-

hwAddr

-

Mandatory

-

MAC address of the NIC.

-

mtu

-

Mandatory

-

MTU of the NIC. The value ranges from 46 to 9600.

-

vhostUserSocket

-

Optional

-

Socket path for DPDK polling. The path contains a maximum of 128 bytes. The naming rule can contain digits, letters, and hyphens (-). The path name must start with a letter.

-

queues

-

Optional

-

Number of NIC queues. If this parameter is not set, the default value 0 is used.

-
- - The following describes the output of the **kata-runtime kata-network add-iface** command for adding NICs: - - - If the command is successfully executed, the NIC information in JSON format is returned from **standard output \(stdout\)**. The content in JSON format is the same as the input NIC information. - - Example: - - ``` - $ kata-runtime kata-network add-iface net.json - {"device":"tap_test","name":"eth-test","IPAddresses":[{"Family":2,"Address":"173.85.100.1","Mask":"24"}],"mtu":1500,"hwAddr":"02:42:20:6e:03:01","pciAddr":"01.0/00"} - ``` - - - If the command fails to be executed, null is returned from **stdout**. - - Example: - - ``` - $ kata-runtime kata-network add-iface netbad.json 2>/dev/null - null - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >If an IP address is specified for an NIC that is successfully added, Kata adds a default route whose destination is in the same network segment as the IP address of the NIC. In the preceding example, after the NIC is added, the following route is added to the container: - >``` - >[root@6ec7a98 /]# ip route - >172.16.0.0/16 dev eth-test proto kernel scope link src 172.16.0.3 - >``` - -2. **Run the following command to view the added NICs:** - - ``` - $ kata-runtime kata-network list-ifaces 6ec7a98 - [{"name":"eth-test","mac":"02:42:20:6f:a3:69","ip":["172.16.0.3/16"],"mtu":1500}] - ``` - - The information about the added NICs is displayed. - - The following describes the output of the **kata-runtime kata-network list-ifaces **command for listing added NICs: - - - If the command is executed successfully, information about all NICs inserted into the pod in JSON format is returned from **stdout**. - - If multiple NICs are inserted into the pod, the NIC information in JSON array format is returned. - - ``` - $ kata-runtime kata-network list-ifaces - [{"name":"container_eth","mac":"02:42:20:6e:a2:59","ip":["172.17.25.23/8"],"mtu":1500},{"name":"container_eth_2","mac":"02:90:50:6b:a2:29","ip":["192.168.0.34/24"],"mtu":1500}] - ``` - - If no NIC is inserted into the pod, null is returned from **stdout**. - - ``` - $ kata-runtime kata-network list-ifaces - null - ``` - - - If the command fails to be executed, null is returned from **stdout**, and error description is returned from **standard error \(stderr\)**. - - Example: - - ``` - $ kata-runtime kata-network list-ifaces - null - ``` - -3. **Add a route for a specified NIC.** - - ``` - $ cat ./test-route.json | kata-runtime kata-network add-route 6ec7a98 - - [{"dest":"default","gateway":"172.16.0.1","device":"eth-test"}] - ``` - - The following describes the output of the **kata-runtime kata-network add-route** command for adding a route to a specified NIC: - - - If the command is executed successfully, the added route information in JSON format is returned from **stdout**. - - Example: - - ``` - $ kata-runtime kata-network add-route route.json - [{"dest":"177.17.0.0/24","gateway":"177.17.25.1","device":"netport_test_1"}] - ``` - - - If the command fails to be executed, null is returned from **stdout**, and error description is returned from **standard error \(stderr\)**. - - Example: - - ``` - $ kata-runtime kata-network add-route routebad.json 2>/dev/null - null - ``` - - Key fields are described as follows: - - - **dest**: Network segment corresponding to the route. The value is in the format of <_ip_\>/<_mask_\>. <_ip_\> is mandatory. There are three cases: - 1. Both IP address and mask are configured. - 2. If only an IP address is configured, the default mask is 32. - 3. If **"dest":"default"** is configured, there is no destination by default. In this case, the gateway needs to be configured. - - - **gateway**: Next-hop gateway of the route. When **"dest":"default"** is configured, the gateway is mandatory. In other cases, this parameter is optional. - - **device**: Name of the NIC corresponding to the route, which is mandatory. The value contains a maximum of 15 characters. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >If a route is added for the loopback device **lo** in the container, the device name corresponding to the **device** field in the route configuration file is **lo**. - -4. **Run the following command to delete a specified route:** - - ``` - $ cat ./test-route.json | kata-runtime kata-network del-route 6ec7a98 - - ``` - - The fields in the **test-route.json** file are the same as those in the JSON file for adding a route. - - The following describes the output of the** kata-runtime kata-network del-route** command for deleting a specified route: - - - If the command is executed successfully, the added route information in JSON format is returned from **stdout**. - - Example: - - ``` - $ kata-runtime kata-network del-route route.json - [{"dest":"177.17.0.0/24","gateway":"177.17.25.1","device":"netport_test_1"}] - ``` - - - If the command fails to be executed, null is returned from **stdout**, and error description is returned from **standard error \(stderr\)**. - - Example: - - ``` - $ kata-runtime kata-network del-route routebad.json 2>/dev/null - null - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- In the input fields, **dest** is mandatory, and both **device** and **gateway** are optional. Kata performs fuzzy match based on different fields and deletes the corresponding routing rules. For example, if **dest** is set to an IP address, all rules of this IP address will be deleted. - >- If the route of the loopback device **lo** in the container is deleted, the device name corresponding to the **device** field in the route configuration file is **lo**. - -5. **Run the following command to delete an NIC:** - - ``` - $ cat ./test-iface.json | kata-runtime kata-network del-iface 6ec7a98 - - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >When deleting an NIC, you can only delete it based on the **name** field in the NIC container. Kata does not identify other fields. - - The following describes the output of the **kata-runtime kata-network del-iface **command for deleting NICs: - - - If the command is executed successfully, null is returned from **stdout**. - - Example: - - ``` - $ kata-runtime kata-network del-iface net.json - null - ``` - - - If the command fails to be executed, the information about NICs that fail to be deleted in JSON format is returned from **stdout**, and error description is returned from **stderr**. - - Example: - - ``` - $ kata-runtime kata-network del-iface net.json - {"device":"tapname_fun_012","name":"netport_test_1","IPAddresses":[{"Family":0,"Address":"177.17.0.1","Mask":"8"}],"mtu":1500,"hwAddr":"02:42:20:6e:a2:59","linkType":"tap"} - ``` - - - -The preceding are common commands. For details about the command line interfaces, see [APIs](#apis-32.md#EN-US_TOPIC_0184808188). - -## Kata IPVS Subsystem - -The secure container provides an API for adding the **ipvs** command and setting the IPVS rule for the container. The functions include adding, editing, and deleting virtual services, adding, editing, and deleting real servers, querying IPVS service information, setting connection timeout, clearing the system connection cache, and importing rules in batches. - -1. Add a virtual service address for the container. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--add-service --tcp-service 172.17.0.7:80 --scheduler rr --persistent 3000" - ``` - -2. Modify virtual service parameters of a container. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--edit-service --tcp-service 172.17.0.7:80 --scheduler rr --persistent 5000" - ``` - -3. Delete the virtual service address of a container. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--delete-service --tcp-service 172.17.0.7:80" - ``` - -4. Add a real server for the virtual service address. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--add-server --tcp-service 172.17.0.7:80 --real-server 172.17.0.4:80 --weight 100" - ``` - -5. Modify real server parameters of a container. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--edit-server --tcp-service 172.17.0.7:80 --real-server 172.17.0.4:80 --weight 200" - ``` - -6. Delete a real server from a container. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--delete-server --tcp-service 172.17.0.7:80 --real-server 172.17.0.4:80" - ``` - -7. Query service information. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--list" - ``` - -8. It takes a long time to import rules one by one. You can write rules into a file and import them in batches. - - ``` - kata-runtime kata-ipvs ipvsadm --restore - < - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >By default, the NAT mode is used for adding a single real server. To add real servers in batches, you need to manually add the **-m** option to use the NAT mode. - >The following is an example of the rule file content: - >-A -t 10.10.11.12:100 -s rr -p 3000 - >-a -t 10.10.11.12:100 -r 172.16.0.1:80 -m - >-a -t 10.10.11.12:100 -r 172.16.0.1:81 -m - >-a -t 10.10.11.12:100 -r 172.16.0.1:82 -m - -9. Clear the system connection cache. - - ``` - kata-runtime kata-ipvs cleanup --parameters "--orig-dst 172.17.0.4 --protonum tcp" - ``` - -10. Set timeout interval for TCP, TCP FIN, or UDP connections. - - ``` - kata-runtime kata-ipvs ipvsadm --parameters "--set 100 100 200" - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >1. Each container supports a maximum of 20000 iptables rules \(5000 services and three servers/services\). Both add-service and add-server are rules. - >2. Before importing rules in batches, you need to clear existing rules. - >3. No concurrent test scenario exists. - >4. The preceding are common commands. For details about the command line interfaces, see [APIs](#apis-32.md#EN-US_TOPIC_0184808188). - - diff --git a/docs/en/docs/Container/configuring-resources-for-a-secure-container.md b/docs/en/docs/Container/configuring-resources-for-a-secure-container.md deleted file mode 100644 index ded03af6d5b7f39ab1e55bb239fec3a7687b45f2..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/configuring-resources-for-a-secure-container.md +++ /dev/null @@ -1,341 +0,0 @@ -# Configuring Resources for a Secure Container - -- [Configuring Resources for a Secure Container](#configuring-resources-for-a-secure-container) - - [Sharing Resources](#sharing-resources) - - [Limiting CPU Resources](#limiting-cpu-resources) - - [Limiting Memory Resources](#limiting-memory-resources) - - [Limiting Block I/O Resources](#limiting-block-i-o-resources) - - [Limiting File Descriptor Resources](#limiting-file-descriptor-resources) - - -The secure container runs on a virtualized and isolated lightweight VM. Therefore, resource configuration is divided into two parts: resource configuration for the lightweight VM, that is, host resource configuration; resource configuration for containers in the VM, that is, guest container resource configuration. The following describes resource configuration for the two parts in detail. - - -## Sharing Resources - -Because the secure container runs on a virtualized and isolated lightweight VM, resources in some namespaces on the host cannot be accessed. Therefore, **--net host**, **--ipc host**, **--pid host**, and **--uts host** are not supported during startup. - -When a pod is started, all containers in the pod share the same net namespace and ipc namespace by default. If containers in the same pod need to share the pid namespace, you can use Kubernetes to configure the pid namespace. In Kubernetes 1.11, the pid namespace is disabled by default. - -## Limiting CPU Resources - -1. Configure CPU resources for running a lightweight VM. - - Configuring CPU resources of a lightweight VM is to configure the vCPUs for running the VM. The secure container uses **--annotation com.github.containers.virtcontainers.sandbox\_cpu** to configure the CPU resources for running the lightweight VM. This option can be configured only on the pause container. - - ``` - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox --annotation com.github.containers.virtcontainers.sandbox_cpu= - ``` - - Example: - - ``` - #Start a pause container. - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox --annotation com.github.containers.virtcontainers.sandbox_cpu=4 busybox sleep 999999 - be3255a3f66a35508efe419bc52eccd3b000032b9d8c9c62df611d5bdc115954 - - #Access the container and check whether the number of CPUs is the same as that configured in the com.github.containers.virtcontainers.sandbox_cpu file. - docker exec be32 lscpu - Architecture: aarch64 - Byte Order: Little Endian - CPU(s): 4 - On-line CPU(s) list: 0-3 - Thread(s) per core: 1 - Core(s) per socket: 1 - Socket(s): 4 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The maximum number of CPUs that can be configured is the number of CPUs \(excluding isolated cores\) that can run on the OS. The minimum number of CPUs is 0.5. - -2. Configure CPU resources for running a container. - - The method of configuring CPU resources for a container is the same as that for an open-source Docker container. You can configure CPU resources by setting the following parameters in the **docker run** command: - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

--cpu-shares

-

Sets the percentage of CPU time that can be used by the container.

-

--cpus

-

Sets the number of CPUs that can be used by the container.

-

--cpu-period

-

Sets the scheduling period of the container process.

-

--cpu-quota

-

Sets the CPU time that can be used by the container process in a scheduling period.

-

--cpuset-cpus

-

Sets the list of CPUs that can be used by the container process.

-
NOTE:

When the secure container uses the --cpuset-cpus option to bind a CPU, the CPU ID cannot exceed the number of CPUs in the lightweight VM corresponding to the secure container minus 1. (The CPU ID in the lightweight VM starts from 0.)

-
-

--cpuset-mems

-

Sets the memory node that can be accessed by the container process.

-
NOTE:

Secure containers do not support the multi-NUMA architecture and configuration. The --cpuset-mems option of NUMA memory can only be set to 0.

-
-
- -3. Configure CPU hot swap. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The CPU hot swap function of the secure container requires the virtualization component QEMU. - - The **enable\_cpu\_memory\_hotplug** option in the kata-runtime configuration file **config.toml** is used to enable or disable CPU and memory hot swap. The default value is **false**, indicating that CPU and memory hot swap is disabled. If the value is **true**, CPU and memory hot swap is enabled. - - The **--cpus** option is reused in kata-runtime to implement the CPU hot swap function. The total number of **--cpus** options of all containers in a pod is calculated to determine the number of CPUs to be hot added to the lightweight VM. - - Example: - - ``` - #Start a pause container. By default, one vCPU is allocated to a lightweight VM. - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox busybox sleep 999999 - 77b40fb72f63b11dd3fcab2f6dabfc7768295fced042af8c7ad9c0286b17d24f - - #View the number of CPUs in the lightweight VM after the pause container is started. - docker exec 77b40fb72f6 lscpu - Architecture: x86_64 - CPU op-mode(s): 32-bit, 64-bit - Byte Order: Little Endian - CPU(s): 1 - On-line CPU(s) list: 0 - Thread(s) per core: 1 - Core(s) per socket: 1 - Socket(s): 1 - - #Start a new container in the same pod and run the --cpus command to set the number of CPUs required by the container to 4. - docker run -tid --runtime kata-runtime --network none --cpus 4 --annotation io.kubernetes.docker.type=container --annotation io.kubernetes.sandbox.id=77b40fb72f63b11dd3fcab2f6dabfc7768295fced042af8c7ad9c0286b17d24f busybox sleep 999999 - 7234d666851d43cbdc41da356bf62488b89cd826361bb71d585a049b6cedafd3 - - #View the number of CPUs in the current lightweight VM. - docker exec 7234d6668 lscpu - Architecture: x86_64 - CPU op-mode(s): 32-bit, 64-bit - Byte Order: Little Endian - CPU(s): 4 - On-line CPU(s) list: 0-3 - Thread(s) per core: 1 - Core(s) per socket: 1 - Socket(s): 4 - - #View the number of CPUs in the lightweight VM after deleting the container where CPUs are hot added. - docker rm -f 7234d666851d - 7234d666851d - - docker exec 77b40fb72f6 lscpu - Architecture: x86_64 - CPU op-mode(s): 32-bit, 64-bit - Byte Order: Little Endian - CPU(s): 1 - On-line CPU(s) list: 0 - Thread(s) per core: 1 - Core(s) per socket: 1 - Socket(s): 1 - ``` - -    - -    - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The pause container is only a placeholder container and does not have any workload. Therefore, when a lightweight VM is started, the CPU allocated by default can be shared by other containers. Therefore, you only need to hot add three CPUs to the lightweight VM for the new container started in the preceding example. - - - After the container where the CPU is hot added is stopped, the CPU is removed when the container is started. - - -## Limiting Memory Resources - -1. Configure memory resources for running a lightweight VM. - - Configuring the memory resources of a lightweight VM is to configure the memory for running the VM. The secure container uses **--annotation com.github.containers.virtcontainers.sandbox\_mem** to configure the memory resources for running the lightweight VM. This option can be configured only on the pause container. - - ``` - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox --annotation com.github.containers.virtcontainers.sandbox_mem= - ``` - - Example: - - ``` - #Start a pause container and use --annotation com.github.containers.virtcontainers.sandbox_mem=4G to allocate 4 GB memory to the lightweight VM. - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox --annotation com.github.containers.virtcontainers.sandbox_mem=4G busybox sleep 999999 - 1532c3e59e7a45cd6b419aa1db07dd0069b0cdd93097f8944177a25e457e4297 - - #View the memory information of the lightweight VM and check whether the memory size is the same as that configured in the com.github.containers.virtcontainers.sandbox_mem file. - docker exec 1532c3e free -m - total used free shared buff/cache available - Mem: 3950 20 3874 41 55 3858 - Swap: 0 0 0 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- If the memory size of a lightweight VM is not set using **--annotation com.github.containers.virtcontainers.sandbox\_mem**, the lightweight VM uses 1 GB memory by default. - >- The minimum memory size of a pod in a secure container is 1 GB, and the maximum memory size is 256 GB. If the memory size allocated to a user exceeds 256 GB, an undefined error may occur. Currently, secure containers do not support the scenario where the memory size exceeds 256 GB. - -2. Configure memory resources for running a container. - - The method of configuring memory resources for running a container is the same as that for the open-source Docker container. You can configure memory resource limitation parameters in the **docker run** command. - - - - - - - - - - -

Parameter

-

Description

-

-m/--memory

-

Sets the memory size that can be used by the container process.

-
NOTE:
  • When memory hot swap is disabled, the value of -m must be less than or equal to the memory size allocated when the lightweight VM is started.
-
-
- -3. Configure memory hot add. - - The memory hot add function is also configured by the **enable\_cpu\_memory\_hotplug** option in the kata-runtime configuration file **config.toml**. For details, see [3](#limiting-cpu-resources.md#en-us_topic_0183903699_li2167326144011). - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Currently, memory resources support hot add only. - - The **-m** option is reused in kata-runtime to implement the memory hot add function. The sum of the **-m** options of all containers in a pod is collected to determine the number of memories to be hot added to a lightweight VM. - - Example: - - ``` - #Start a pause container. By default, 1 GB memory is allocated to the lightweight VM. - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox busybox sleep 999999 - 99b78508ada3fa7dcbac457bb0f6e3784e64e7f7131809344c5496957931119f - - #View the memory size of the lightweight VM after the pause container is started. - docker exec 99b78508ada free -m - total used free shared buff/cache available - Mem: 983 18 914 36 50 908 - Swap: 0 0 0 - - #Start a new container in the same pod and run the -m command to set the memory size required by the container to 4 GB. - docker run -tid --runtime kata-runtime --network none -m 4G --annotation io.kubernetes.docker.type=container --annotation io.kubernetes.sandbox.id=99b78508ada3fa7dcbac457bb0f6e3784e64e7f7131809344c5496957931119f busybox sleep 999999 - c49461745a712b2ef3127fdf43b2cbb034b7614e6060b13db12b7a5ff3c830c8 - - #View the memory size of the lightweight VM. - docker exec c49461745 free -m - total used free shared buff/cache available - Mem: 4055 69 3928 36 57 3891 - Swap: 0 0 0 - - #After deleting the container where the CPU is hot added, check the memory size of the lightweight VM. - docker rm -f c49461745 - c49461745 - - #The hot added memory does not support the hot add function. Therefore, after the hot added memory container is deleted from the lightweight VM, the memory is still 4 GB. - docker exec 99b78508ada free -m - total used free shared buff/cache available - Mem: 4055 69 3934 36 52 3894 - Swap: 0 0 0 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The pause container is only a placeholder container and does not have any workload. Therefore, the memory allocated to the lightweight VM during startup can be shared by other containers. You only need to hot add 3 GB memory to the lightweight VM for the new container started in the preceding example. - - -## Limiting Block I/O Resources - -1. Configure the block I/O resources for running a lightweight VM. - - To configure block I/O resources for running a lightweight VM of secure containers, use **--annotation com.github.containers.virtcontainers.blkio\_cgroup**. This option can be configured only on the pause container. - - ``` - docker run -tid --runtime --network none --annotation io.kubernetes.docker.type=podsandbox --annotation com.github.containers.virtcontainers.blkio_cgroup= - ``` - - The value of **--annotation com.github.containers.virtcontainers.blkio\_cgroup** must comply with the definition of the BlkioCgroup structure. - - ``` - // BlkioCgroup for Linux cgroup 'blkio' data exchange - type BlkioCgroup struct { - // Items specifies per cgroup values - Items []BlockIOCgroupItem `json:"blkiocgroup,omitempty"` - } - - type BlockIOCgroupItem struct { - // Path represent path of blkio device - Path string `json:"path,omitempty"` - // Limits specifies the blkio type and value - Limits []IOLimit `json:"limits,omitempty"` - } - - type IOLimit struct { - // Type specifies IO type - Type string `json:"type,omitempty"` - // Value specifies rate or weight value - Value uint64 `json:"value,omitempty"` - } - ``` - - The values of the **Type** field in the **IOLimit** structure body are as follows: - - ``` - // BlkioThrottleReadBps is the key to fetch throttle_read_bps - BlkioThrottleReadBps = "throttle_read_bps" - - // BlkioThrottleWriteBps is the key to fetch throttle_write_bps - BlkioThrottleWriteBps = "throttle_write_bps" - - // BlkioThrottleReadIOPS is the key to fetch throttle_read_iops - BlkioThrottleReadIOPS = "throttle_read_iops" - - // BlkioThrottleWriteIOPS is the key to fetch throttle_write_iops - BlkioThrottleWriteIOPS = "throttle_write_iops" - - // BlkioWeight is the key to fetch blkio_weight - BlkioWeight = "blkio_weight" - - // BlkioLeafWeight is the key to fetch blkio_leaf_weight - BlkioLeafWeight = "blkio_leaf_weight" - ``` - - Example: - - ``` - docker run -tid --runtime kata-runtime --network none --annotation com.github.containers.virtcontainers.blkio_cgroup='{"blkiocgroup":[{"path":"/dev/sda","limits":[{"type":"throttle_read_bps","value":400},{"type":"throttle_write_bps","value":400},{"type":"throttle_read_iops","value":700},{"type":"throttle_write_iops","value":699}]},{"limits":[{"type":"blkio_weight","value":78}]}]}' busybox sleep 999999 - ``` - - The preceding command is used to limit the block I/O traffic of the **/dev/sda** disk used by the started secure container by setting **throttle\_read\_bps** to 400 bit/s, **throttle\_write\_bps** to 400 bit/s, **throttle\_read\_iops** to 700 times/s, **throttle\_write\_iops** to 699 times/s, and the weight of the block I/O cgroup to 78. - - -## Limiting File Descriptor Resources - -To prevent the file descriptor resources on the host from being exhausted when a large number of files in the 9p shared directory are opened in the container, the secure container can customize the maximum number of file descriptors that can be opened by the QEMU process of the secure container. - -The secure container reuses the **--files-limit** option in the **docker run** command to set the maximum number of file descriptors that can be opened by the QEMU process of the secure container. This parameter can be configured only on the pause container. The usage method is as follows: - -``` -docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox --files-limit bash -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- If the value of **--files-limit** is less than the default minimum value **1024** and is not **0**, the maximum number of file descriptors that can be opened by the QEMU process of the secure container is set to the minimum value **1024**. ->- If the value of **--files-limit** is 0, the maximum number of file descriptors that can be opened by the QEMU process of the secure container is the default value obtained by dividing the maximum number of file descriptors that can be opened by the system \(/proc/sys/fs/file-max\) by 400. ->- If the maximum number of file descriptors that can be opened by the QEMU process of the secure container is not displayed when the secure container is started, the maximum number of file descriptors that can be opened by the QEMU process of the secure container is the same as the system default value. - diff --git a/docs/en/docs/Container/container-engine.md b/docs/en/docs/Container/container-engine.md deleted file mode 100644 index 5b8e754314ec95d9f920c49da86af5b650de6898..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/container-engine.md +++ /dev/null @@ -1,310 +0,0 @@ -# Container Engine - -- [Container Engine](#container-engine) - - -Docker daemon is a system process that resides in the background. Before you run a docker subcommand, start Docker daemon. - -   - -If the Docker daemon is installed using the RPM package or system package management tool, you can run the **systemctl start docker** command to start the Docker daemon. - -The **docker** command supports the following parameters: - -1. To combine parameters of a single character, run the following command: - - ``` - docker run -t -i busybox /bin/sh - ``` - - The command can be written as follows: - - ``` - docker run -ti busybox /bin/sh - ``` - -2. **bool** command parameters such as **--icc=true**, are displayed in the command help. If this parameter is not used, the default value displayed in the command help is used. If this parameter is used, the opposite value of the value displayed in the command help is used. In addition, if **--icc** is not added when Docker daemon is started, **--icc=true** is used by default. Otherwise, **--icc=false** is used. -3. Parameters such as **--attach=\[\]** in the command help indicate that these parameters can be set for multiple times. For example: - - ``` - docker run --attach=stdin --attach=stdout -i -t busybox /bin/sh - ``` - -4. Parameters such as **-a** and **--attach=\[\]** in the command help indicate that the parameter can be specified using either **-a** _value_ or **--attach=**_value_. For example: - - ``` - docker run -a stdin --attach=stdout -i -t busybox /bin/sh - ``` - -5. Parameters such as **--name=""** can be configured with a character string and can be configured only once. Parameters such as **-c=** can be configured with an integer and can be configured only once. - -**Table 1** Parameters specified during the Docker daemon startup - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

--api-cors-header

-

CORS header information for enabling remote API calling. This interface supports the secondary development of upper-layer applications, which sets the CORS header for a remote API.

-

--authorization-plugin=[]

-

Authentication plug-in.

-

-b, --bridge=""

-

Existing bridge device mounting to the docker container. Note: none can be used to disable the network in the container.

-

--bip=""

-

Bridge IP address, which is automatically created using the CIDR address. Note: this parameter cannot be used with -b .

-

--cgroup-parent

-

cgroup parent directory configured for all containers.

-

--config-file=/etc/docker/daemon.json

-

Configuration file for starting Docker daemon.

-

--containerd

-

Socket path of containerd.

-

-D, --debug=false

-

Specifies whether to enable the debugging mode.

-

--default-gateway

-

Default gateway of the container IPv4 address.

-

--default-gateway-v6

-

Default gateway of the container IPv6 address.

-

--default-ulimit=[]

-

Default ulimit value of the container.

-

--disable-legacy-registry

-

Disables the original registry.

-

--dns=[]

-

DNS server of the forcibly used container.

-

Example: --dns 8.8.x.x

-

--dns-opt=[]

-

DNS option.

-

--dns-search=[]

-

Forcibly searches DNS search domain name used by a container.

-

Example: --dns-search example.com

-

--exec-opt=[]

-

Parameter to be executed when a container is started.

-

For example, set the native.umask parameter.

-
#The umask value of the started container is 0022.--exec-opt native.umask=normal 
-#The umask value of the started container is 0027 (default value).
---exec-opt  native.umask=secure    
-

Note: If native.umask is also configured in docker create or docker run command, the configuration in command is used.

-

--exec-root=/var/run/docker

-

Root directory for storing the execution status file.

-

--fixed-cidr=""

-

Fixed IP address (for example, 10.20.0.0/16) of the subnet. The IP address of the subnet must belong to the network bridge.

-

--fixed-cidr-v6

-

Fixed IPv6 address.

-

-G, --group="docker"

-

Group assigned to the corresponding Unix socket in the background running mode. Note: When an empty string is configured for this parameter, the group information is removed.

-

-g, --graph="/var/lib/docker"

-

The root directory for running docker.

-

-H, --host=[]

-

Socket bound in background mode. One or more sockets can be configured using tcp://host:port, unix:///path to socket, fd://* or fd://socketfd. Example:

-

$ dockerd -H tcp://0.0.0.0:2375

-

or

-

$ export DOCKER_HOST="tcp://0.0.0.0:2375"

-

--insecure-registry=[]

-

Registry for insecure connections. By default, the Docker uses TLS certificates to ensure security for all connections. If the registry does not support HTTPS connections or the certificate is issued by an unknown certificate authority of the Docker daemon, you need to configure --insecure-registry=192.168.1.110:5000 when starting the daemon. This parameter needs to be configured if a private registry is used.

-

--image-layer-check=true

-

Image layer integrity check. To enable the function, set this parameter to true. Otherwise, set this parameter to false. If this parameter is not configured, the function is disabled by default.

-

When Docker is started, the image layer integrity is checked. If the image layer is damaged, the related images are unavailable. Docker cannot verify empty files, directories, or link files. Therefore, if the preceding files are lost due to a power failure, the integrity check of Docker image data may fail. When the Docker version changes, check whether the parameter is supported. If not supported, delete it from the configuration file.

-

--icc=true

-

Enables communication between containers.

-

--ip="0.0.0.0"

-

Default IP address used when a container is bound to a port.

-

--ip-forward=true

-

Starts the net.ipv4.ip_forward process of the container.

-

--ip-masq=true

-

Enables IP spoofing.

-

--iptables=true

-

Starts the iptables rules defined by the Docker container.

-

-l, --log-level=info

-

Log level.

-

--label=[]

-

Daemon label, in key=value format.

-

--log-driver=json-file

-

Default log driver of container logs.

-

--log-opt=map[]

-

Log drive parameters.

-

--mtu=0

-

MTU value of the container network. If this parameter is not configured, value of route MTU is used by default. If the default route is not configured, set this parameter to the constant value 1500.

-

-p, --pidfile="/var/run/docker.pid"

-

PID file path of the background process.

-

--raw-logs

-

Logs with all timestamps and without the ANSI color scheme.

-

--registry-mirror=[]

-

Image registry preferentially used by the dockerd.

-

-s, --storage-driver=""

-

Storage driver used when a container is forcibly run.

-

--selinux-enabled=false

-

Enables SELinux. If the kernel version is 3.10.0-862.14 or later, this parameter cannot be set to true.

-

--storage-opt=[]

-

Storage driver parameter. This parameter is valid only when the storage driver is devicemapper. Example: dockerd --storage-opt dm.blocksize=512K

-

--tls=false

-

Enables the TLS authentication.

-

--tlscacert="/root/.docker/ca.pem"

-

Certificate file path that has been authenticated by the CA.

-

--tlscert="/root/.docker/cert.pem"

-

File path of the TLS certificates.

-

--tlskey="/root/.docker/key.pem"

-

File path of TLS keys.

-

--tlsverify=false

-

Verifies the communication between the background processes and the client using TLS.

-

--insecure-skip-verify-enforce

-

Whether to forcibly skip the verification of the certificate host or domain name. The default value is false.

-

--use-decrypted-key=true

-

Whether to use the decryption private key.

-

--userland-proxy=true

-

Whether to use the userland proxy for the container LO device.

-

--userns-remap

-

User namespace-based user mapping table in the container.

-
NOTE:

This parameter is not supported in the current version.

-
-
- diff --git a/docs/en/docs/Container/container-management-1.md b/docs/en/docs/Container/container-management-1.md deleted file mode 100644 index 0619fdd77430868ca53239aee2d6f4aea8cb2ec4..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/container-management-1.md +++ /dev/null @@ -1,721 +0,0 @@ -## Container Management - -- [Container Management](#container-management-1) - - [Creating a Container](#creating-a-container) - - [Creating Containers Using hook-spec](#creating-containers-using-hook-spec) - - [Configuring Health Check During Container Creation](#configuring-health-check-during-container-creation) - - [Stopping and Deleting a Container](#stopping-and-deleting-a-container) - - [Querying Container Information](#querying-container-information) - - [Modification Operations](#modification-operations) - - - -## Creating a Container - -### Downloading Images - -Only user **root** can run the **docker** command. If you log in as a common user, you need to use the **sudo** command before running the **docker** command. - -``` -[root@localhost ~]# docker pull busybox -``` - -This command is used to download the **busybox:latest** image from the official Docker registry. \(If no tag is specified in the command, the default tag name **latest** is used.\) During the image download, the system checks whether the dependent layer exists locally. If yes, the image download is skipped. When downloading images from a private registry, specify the registry description. For example, if a private registry containing some common images is created and its IP address is **192.168.1.110:5000**, you can run the following command to download the image from the private registry: - -``` -[root@localhost ~]# docker pull 192.168.1.110:5000/busybox -``` - -The name of the image downloaded from the private registry contains the registry address information, which may be too long. Run the **docker tag** command to generate an image with a shorter name. - -``` -[root@localhost ~]# docker tag 192.168.1.110:5000/busybox busybox -``` - -Run the **docker images** command to view the local image list. - -### Running a Simple Application - -``` -[root@localhost ~]# docker run busybox /bin/echo "Hello world" -Hello world -``` - -This command uses the **busybox:latest** image to create a container, and executes the **echo "Hello world"** command in the container. Run the following command to view the created container: - -``` -[root@localhost ~]# docker ps -l -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -d8c0a3315bc0 busybox"/bin/echo 'Hello wo..." 5 seconds ago Exited (0) 3 seconds ago practical_franklin -``` - -### Creating an Interactive Container - -``` -[root@localhost ~]# docker run -it busybox /bin/bash -root@bf22919af2cf:/# ls -bin boot dev etc home lib media mnt opt proc root run sbin srv sys tmp usr var -root@bf22919af2cf:/# pwd -/ -``` - -The **-ti** option allocates a pseudo terminal to the container and uses standard input \(STDIN\) for interaction. You can run commands in the container. In this case, the container is an independent Linux VM. Run the **exit** command to exit the container. - -### Running a Container in the Background - -Run the following command. **-d** indicates that the container is running in the background. **--name=container1** indicates that the container name is **container1**. - -``` -[root@localhost ~]# docker run -d --name=container1 busybox /bin/sh -c "while true;do echo hello world;sleep 1;done" -7804d3e16d69b41aac5f9bf20d5f263e2da081b1de50044105b1e3f536b6db1c -``` - -The command output contains the container ID but does not contain **hello world**. In this case, the container is running in the background. You can run the **docker ps** command to view the running container. - -``` -[root@localhost ~]# docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -7804d3e16d69 busybox "/bin/sh -c 'while tr" 11 seconds ago Up 10 seconds container1 -``` - -Run the following **docker logs** command to view the output during container running: - -``` -[root@localhost ~]# docker logs container1 -hello world -hello world -hello world -... -``` - -### Container Network Connection - -By default, a container can access an external network, while port mapping is required when an external network accesses a container. The following uses how to run the private registry service in Docker as an example. In the following command, **-P** is used to expose open ports in the registry to the host. - -``` -[root@localhost ~]# docker run --name=container_registry -d -P registry -cb883f6216c2b08a8c439b3957fb396c847a99079448ca741cc90724de4e4731 -``` - -The container\_registry container has been started, but the mapping between services in the container and ports on the host is not clear. You need to run the **docker port** command to view the port mapping. - -``` -[root@localhost ~]# docker port container_registry -5000/tcp -> 0.0.0.0:49155 -``` - -The command output shows that port 5000 in the container is mapped to port 49155 on the host. You can access the registry service by using the host IP address **49155**. Enter **http://localhost:49155** in the address box of the browser and press **Enter**. The registry version information is displayed. - -When running registry images, you can directly specify the port mapping, as shown in the following: - -``` -docker run --name=container_registry -d -p 5000:5000 registry -``` - -**-p 5000:5000** is used to map port 5000 in the container to port 5000 on the host. - -### Precautions - -- **Do Not Add -a stdin Independently During Container Startup** - - When starting a container, you must add **-a stdout** or **-a stderr** together with **-a stdin** instead of **-a stdin** only. Otherwise, the device stops responding even after the container exits. - - -- **Do Not Use the Long Or Short ID of an Existing Container As the Name of a New Container** - - When creating a container, do not use the long or short ID of the existing container A as the name of the new container B. If the long ID of container A is used as the name of container B, Docker will match container A even though the name of container B is used as the specified target container for operations. If the short ID of container A is used as the name of container B, Docker will match container B even though the short ID of container A is used as the specified target container for operations. This is because Docker matches the long IDs of all containers first. If the matching fails, the system performs exact matching using the value of **container\_name**. If matching failure persists, the container ID is directly matched in fuzzy mode. - -- **Containers That Depend on Standard Input and Output, Such As sh/bash, Must Use the -ti Parameter to Avoid Exceptions** - - Normal case: If you do not use the **-ti** parameter to start a process container such as sh/bash, the container exits immediately. - - The cause of this problem is that Docker creates a stdin that matches services in the container first. If the interactive parameters such as **-ti** are not set, Docker closes pipe after the container is started and the service container process sh/bash exits after stdin is closed. - - Exception: If Docker daemon is forcibly killed in a specific phase \(before pipe is closed\), daemon of the pipe is not closed in time. In this case, the sh/bash process does not exit even without **-ti**. As a result, an exception occurs. You need to manually clear the container. - - After being restarted, daemon takes over the original container stream. Containers without the **-ti** parameter may not be able to process the stream because these containers do not have streams to be taken over in normal cases. In actual services, sh/bash without the **-ti** parameter does not take effect and is seldom used. To avoid this problem, the **-ti** parameter is used to restrict interactive containers. - -- **Container Storage Volumes** - - If you use the **-v** parameter to mount files on the host to a container when the container is started, the inodes of the files may be changed when you run the **vi** or **sed** command to modify the files on the host or in the container. As a result, files on the host and in the container are not synchronized. Do not mount files in the container in this mode \(or do not use together with the **vi** and **sed** commands\). You can also mount the upper-layer directories of the files to avoid exceptions. The **nocopy** option can be used to prevent original files in the mount point directory of a container from being copied to the source directory of the host when Docker mounts volumes. However, this option can be used only when an anonymous volume is mounted and cannot be used in the bind mount scenario. - -- **Do Not Use Options That May Affect the Host** - - The **--privileged** option enables all permissions for a container. On the container, mounting operations can be performed and directories such as **/proc** and **/sys** can be modified, which may affect the host. Therefore, do not use this option for common containers. - - A host-shared namespace, such as the **--pid host**, **--ipc host**, or **--net host** option, can enable a container to share the namespace with the host, which will also affect the host. Therefore, do not use this option. - -- **Do Not Use the Unstable Kernel Memory Cgroup** - - Kernel memory cgroup on the Linux kernel earlier than 4.0 is still in the experimental phase and runs unstably. Therefore, do not use kernel memory cgroup. - - When the **docker run --kernel-memory** command is executed, the following alarm is generated: - - ``` - WARNING: You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected as expected and can cause your system to be unstable. - ``` - -- **blkio-weight Parameter Is Unavailable in the Kernel That Supports blkio Precise Control** - - **--blkio-weight-device** can implement more accurate blkio control in a container. The control requires a specified disk device, which can be implemented through the **--blkio-weight-device** parameter of Docker. In this kernel, Docker does not provide the **--blkio-weight** mode to limit the container blkio. If you use this parameter to create a container, the following error is reported: - - ``` - docker: Error response from daemon: oci runtime error: container_linux.go:247: starting container process caused "process_linux.go:398: container init caused \"process_linux.go:369: setting cgroup config for ready process caused \\\"blkio.weight not supported, use weight_device instead\\\"\"" - ``` - -- **Using --blkio-weight-device in CFQ Scheduling Policy** - - The **--blkio-weight-device** parameter works only when the disk works in the Completely Fair Queuing \(CFQ\) policy. - - You can view the scheduler file \(**/sys/block/**_disk_**/queue/scheduler**\) to obtain the policies supported by the disk and the current policy. For example, you can run the following command to view **sda**. - - ``` - # cat /sys/block/sda/queue/scheduler noop [deadline] cfq - ``` - - **sda** supports the following scheduling policies: **noop**, **deadline**, and **cfq**, and the **deadline** policy is being used. You can run the **echo** command to change the policy to **cfq**. - - ``` - # echo cfq > /sys/block/sda/queue/scheduler - ``` - - -- **systemd Usage Restrictions in Basic Container Images** - - When containers created from basic images are used, systemd in basic images is used only for system containers. - - -### Concurrent Performance - -- There is an upper limit for the message buffer in Docker. If the number of messages exceeds the upper limit, the messages are discarded. Therefore, it is recommended that the number of commands executed concurrently should not exceed 1000. Otherwise, the internal messages in Docker may be lost and the container may fail to be started. -- When containers are concurrently created and restarted, the error message"oci runtime error: container init still running" is occasionally reported. This is because containerd optimizes the performance of the event waiting queue. When a container is stopped, the **runc delete** command is executed to kill the init processes in the container within 1s. If the init processes are not killed within 1s, runC returns this error message. The garbage collection \(GC\) mechanism of containerd reclaims residual resources after **runc delete** is executed at an interval of 10s. Therefore, operations on the container are not affected. If the preceding error occurs, wait for 4 or 5s and restart the container. - -### Security Feature Interpretation - -1. The following describes default permission configuration analysis of Docker. - - In the default configuration of a native Docker, capabilities carried by each default process are as follows: - - ``` - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - ``` - - The default seccomp configuration is a whitelist. If any syscall is not in the whitelist, **SCMP\_ACT\_ERRNO** is returned by default. Different system invoking is enabled for different caps of Docker. If a capability is not in the whitelist, Docker will not assign it to the container by default. - -2. CAP\_SYS\_MODULE - - CAP\_SYS\_MODULE allows a container to insert the ko module. Adding this capability allows the container to escape or even damage the kernel. Namespace provides the maximum isolation for a container. In the ko module, you only need to point its namespace to **init\_nsproxy**. - -3. CAP\_SYS\_ADMIN - - The sys\_admin permission provides the following capabilities for a container: - - - For file system: **mount**, **umount**, and **quotactl** - - For namespace setting: **setns**, **unshare**, and **clone new namespace** - - driver ioctl - - For PCI control: **pciconfig\_read**, **pciconfig\_write**, and **pciconfig\_iobase** - - **sethostname** - -4. CAP\_NET\_ADMIN - - CAP\_NET\_ADMIN allows a container to access network interfaces and sniff network traffic. The container can obtain the network traffic of all containers including the host, which greatly damages network isolation. - -5. CAP\_DAC\_READ\_SEARCH - - CAP\_DAC\_READ\_SEARCH calls the open\_by\_handle\_at and name\_to\_handle\_at system calls. If the host is not protected by SELinux, the container can perform brute-force search for the inode number of the file\_handle structure to open any file on the host, which affects the isolation of the file system. - -6. CAP\_SYS\_RAWIO - - CAP\_SYS\_RAWIO allows a container to write I/O ports to the host, which may cause the host kernel to crash. - -7. CAP\_SYS\_PTRACE - - The ptrace permission for a container provides ptrace process debugging in the container. RunC has fixed this vulnerability. However, some tools, such as nsenter and docker-enter, are not protected. In a container, processes executed by these tools can be debugged to obtain resource information \(such as namespace and fd\) brought by these tools. In addition, ptrace can bypass seccomp, greatly increasing attack risks of the kernel. - -8. Docker capability interface: --cap-add all - - --cap-add all grants all permissions to a container, including the dangerous permissions mentioned in this section, which allows the container to escape. - -9. Do not disable the seccomp feature of Docker. - - Docker has a default seccomp configuration with a whitelist. **sys\_call** that is not in the whitelist is disabled by seccomp. You can disable the seccomp feature by running **--security-opt 'seccomp:unconfined'**. If seccomp is disabled or the user-defined seccomp configuration is used but the filtering list is incomplete, attack risks of the kernel in the container are increased. - -10. Do not set the **/sys** and **/proc** directories to writable. - - The **/sys** and **/proc** directories contain Linux kernel maintenance parameters and device management interfaces. If the write permission is configured for the directories in a container, the container may escape. - -11. Docker open capability: --CAP\_AUDIT\_CONTROL - - The permission allows a container to control the audit system and run the **AUDIT\_TTY\_GET** and **AUDIT\_TTY\_SET** commands to obtain the TTY execution records \(including the **root** password\) recorded in the audit system. - -12. CAP\_BLOCK\_SUSPEND and CAP\_WAKE\_ALARM - - The permission provides a container the capability to block the system from suspending \(epoll\). - -13. CAP\_IPC\_LOCK - - With this permission, a container can break the max locked memory limit in **ulimit** and use any mlock large memory block to cause DoS attacks. - -14. CAP\_SYS\_LOG - - In a container with this permission, system kernel logs can be read by using dmesg to break through kernel kaslr protection. - -15. CAP\_SYS\_NICE - - In a container with this permission, the scheduling policy and priority of a process can be changed, causing DoS attacks. - -16. CAP\_SYS\_RESOURCE - - With this permission, a container can bypass resource restrictions, such as disk space resource restriction, keymaps quantity restriction, and **pipe-size-max** restriction, causing DoS attacks. - -17. CAP\_SYS\_TIME - - In a container with this capability, the time on the host can be changed. - -18. Risk analysis of Docker default capabilities - - The default capabilities of Docker include CAP\_SETUID and CAP\_FSETID. If the host and a container share a directory, the container can set permissions for the binary file in the shared directory. Common users on the host can use this method to elevate privileges. With the CAP\_AUDIT\_WRITE capability, a container can write logs to the host, and the host must be configured with log anti-explosion measures. - -19. Docker and host share namespace parameters, such as **--pid**, **--ipc**, and **--uts**. - - This parameter indicates that the container and host share the namespace. The container can attack the host as the namespace of the container is not isolated from that of the host. For example, if you use **--pid** to share PID namespace with the host, the PID on the host can be viewed in the container, and processes on the host can be killed at will. - -20. **--device** is used to map the sensitive directories or devices of the host to the container. - - The Docker management plane provides interfaces for mapping directories or devices on a host to the container, such as **--device** and **-v**. Do not map sensitive directories or devices on the host to the container. - - -## Creating Containers Using hook-spec - -### Principles and Application Scenarios - -Docker supports the extended features of hooks. The execution of hook applications and underlying runC complies with the OCI standards. For details about the standards, visit [https://github.com/opencontainers/runtime-spec/blob/master/config.md\#hooks](#https://github.com/opencontainers/runtime-spec/blob/master/config.md#hooks). - -There are three types of hooks: prestart, poststart, and poststop. They are respectively used before applications in the container are started, after the applications are started, and after the applications are stopped. - -### API Reference - -The **--hook-spec** parameter is added to the **docker run** and **create** commands and is followed by the absolute path of the **spec** file. You can specify the hooks to be added during container startup. These hooks will be automatically appended after the hooks that are dynamically created by Docker \(currently only libnetwork prestart hook\) to execute programs specified by users during the container startup or destruction. - -The structure of **spec** is defined as follows: - -``` -// Hook specifies a command that is run at a particular event in the lifecycle of a container -type Hook struct{ - Path string `json:"path"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` - Timeout *int `json:"timeout,omitempty"` -} -// Hooks for container setup and teardown -type Hooks struct{ - // Prestart is a list of hooks to be run before the container process is executed. - // On Linux, they are run after the container namespaces are created. - Prestart []Hook `json:"prestart,omitempty"` - // Poststart is a list of hooks to be run after the container process is started. - Poststart []Hook `json:"poststart,omitempty"` - // Poststop is a list of hooks to be run after the container process exits. - Poststop []Hook `json:"poststop,omitempty"` -} -``` - -- The **Path**, **Args**, and **Env** parameters are mandatory. -- **Timeout** is optional, while you are advised to set this parameter to a value ranging from 1 to 120. The parameter type is int. Floating point numbers are not allowed. -- The content of the **spec** file must be in JSON format as shown in the preceding example. If the format is incorrect, an error is reported. -- Both **docker run --hook-spec /tmp/hookspec.json **_xxx_, and **docker create --hook-spec /tmp/hookspec.json **_xxx_** && docker start **_xxx_ can be used. - -### Customizing Hooks for a Container - -Take adding a NIC during the startup as an example. The content of the **hook spec** file is as follows: - -``` -{ - "prestart": [ - { - "path": "/var/lib/docker/hooks/network-hook", - "args": ["network-hook", "tap0", "myTap"], - "env": [], - "timeout": 5 - } - ], - "poststart":[], - "poststop":[] -} -``` - -Specify prestart hook to add the configuration of a network hook. The path is **/var/lib/docker/hooks/network-hook**. **args** indicates the program parameters. Generally, the first parameter is the program name, and the second parameter is the parameter accepted by the program. For the network-hook program, two parameters are required. One is the name of the NIC on the host, and the other is the name of the NIC in the container. - -   - -- Precautions - 1. The **hook** path must be in the** hooks** folder in the **graph** directory \(**--graph**\) of Docker. Its default value is **/var/lib/docker/hooks**. You can run the **docker info** command to view the root path. - - ``` - [root@localhost ~]# docker info - ... - Docker Root Dir: /var/lib/docker - ... - ``` - - This path may change due to the user's manual configuration and the use of user namespaces \(**daemon --userns-remap**\). After the symbolic link of the path is parsed, the parsed path must start with _Docker Root Dir_**/hooks** \(for example, **/var/lib/docker/hooks**\). Otherwise, an error message is displayed. - - 2. The **hook** path must be an absolute path because daemon cannot properly process a relative path. In addition, an absolute path meets security requirements. - 3. The information output by the hook program to stderr is output to the client and affects the container lifecycle \(for example, the container may fail to be started\). The information output to stdout is ignored. - 4. Do not reversely call Docker instructions in hooks. - 5. The execute permission must have been granted on the configured hook execution file. Otherwise, an error is reported during hook execution. - 6. The execution time of the hook operation must be as short as possible. If the prestart period is too long \(more than 2 minutes\), the container startup times out. If the poststop period is too long \(more than 2 minutes\), the container is abnormal. - - The known exceptions are as follows: When the **docker stop** command is executed to stop a container and the clearing operation is performed after 2 minutes, the hook operation is not complete. Therefore, the system waits until the hook operation is complete \(the process holds a lock\). As a result, all operations related to the container stop responding. The operations can be recovered only after the hook operation is complete. In addition, the two-minute timeout processing of the **docker stop** command is an asynchronous process. Therefore, even if the **docker stop** command is successfully executed, the container status is still **up**. The container status is changed to **exited** only after the hook operation is completed. - - - -- Suggestions - 1. You are advised to set the hook timeout threshold to a value less than 5s. - 2. You are advised to configure only one prestart hook, one poststart hook, and one poststop hook for each container. If too many hooks are configured, the container startup may take a long time. - 3. You are advised to identify the dependencies between multiple hooks. If required, you need to adjust the sequence of the hook configuration files according to the dependencies. The execution sequence of hooks is based on the sequence in the configured **spec** file. - - -### Multiple **hook-spec** - -If multiple hook configuration files are available and you need to run multiple hooks, you must manually combine these files into a configuration file and specify the new configuration file by using the **--hook-spec** parameter. Then all hooks can take effect. If multiple **--hook-spec** parameters are configured, only the last one takes effect. - -Configuration examples: - -The content of the **hook1.json** file is as follows: - -``` -# cat /var/lib/docker/hooks/hookspec.json -{ - "prestart": [ - { - "path": "/var/lib/docker/hooks/lxcfs-hook", - "args": ["lxcfs-hook", "--log", "/var/log/lxcfs-hook.log"], - "env": [] - } - ], - "poststart":[], - "poststop":[] -} -``` - -The content of the **hook2.json** file is as follows: - -``` -# cat /etc/isulad-tools/hookspec.json -{ - "prestart": [ - { - "path": "/docker-root/hooks/docker-hooks", - "args": ["docker-hooks", "--state", "prestart"], - "env": [] - } - ], - "poststart":[], - "poststop":[ - { - "path": "/docker-root/hooks/docker-hooks", - "args": ["docker-hooks", "--state", "poststop"], - "env": [] - } - ] -} -``` - -The content in JSON format after manual combination is as follows: - -``` -{ - "prestart":[ - { - "path": "/var/lib/docker/hooks/lxcfs-hook", - "args": ["lxcfs-hook", "--log", "/var/log/lxcfs-hook.log"], - "env": [] - }, - { - "path": "/docker-root/hooks/docker-hooks", - "args": ["docker-hooks", "--state", "prestart"], - "env": [] - } - ], - "poststart":[], - "poststop":[ - { - "path": "/docker-root/hooks/docker-hooks", - "args": ["docker-hooks", "--state", "poststop"], - "env": [] - } - ] -} -``` - -Docker daemon reads the binary values of hook in actions such as prestart in the hook configuration files in sequence based on the array sequence and executes the actions. Therefore, you need to identify the dependencies between multiple hooks. If required, you need to adjust the sequence of the hook configuration files according to the dependencies. - -### Customizing Default Hooks for All Containers - -Docker daemon can receive the **--hook-spec** parameter. The semantics of **--hook-spec** is the same as that of **--hook-spec** in **docker create** or **docker run**. You can also add hook configurations to the **/etc/docker/daemon.json** file. - -``` -{ - "hook-spec": "/tmp/hookspec.json" -} -``` - -When a container is running, hooks specified in **--hook-spec** defined by daemon are executed first, and then hooks customized for each container are executed. - -## Configuring Health Check During Container Creation - -Docker provides the user-defined health check function for containers. You can configure the **HEALTHCHECK CMD** option in the Dockerfile, or configure the **--health-cmd** option when a container is created so that commands are periodically executed in the container to monitor the health status of the container based on return values. - -### Configuration Methods - -- Add the following configurations to the Dockerfile file: - - ``` - HEALTHCHECK --interval=5m --timeout=3s --health-exit-on-unhealthy=true \ - CMD curl -f http://localhost/ || exit 1 - ``` - - The configurable options are as follows: - - 1. **--interval=DURATION**: interval between two consecutive command executions. The default value is **30s**. After a container is started, the first check is performed after the interval time. - 2. **--timeout=DURATION**: maximum duration for executing a single check command. If the execution times out, the command execution fails. The default value is **30s**. - 3. **--start-period=DURATION**: container initialization period. The default value is **0s**. During the initialization, the health check is also performed, while the health check failure is not counted into the maximum number of retries. However, if the health check is successful during initialization, the container is considered as started. All subsequent consecutive check failures are counted in the maximum number of retries. - 4. **--retries=N**. maximum number of retries for the health check. The default value is **3**. - 5. **--health-exit-on-unhealthy=BOOLEAN**: whether to kill a container when it is unhealthy. The default value is **false**. - 6. **CMD**: This option is mandatory. If **0** is returned after a command is run in a container, the command execution succeeds. If a value other than **0** is returned, the command execution fails. - - After **HEALTHCHECK** is configured, related configurations are written into the image configurations during image creation. You can run the **docker inspect** command to view the configurations. For example: - - ``` - "Healthcheck": { - "Test": [ - "CMD-SHELL", - "/test.sh" - ] - }, - ``` - - -- Configurations during container creation: - - ``` - docker run -itd --health-cmd "curl -f http://localhost/ || exit 1" --health-interval 5m --health-timeout 3s --health-exit-on-unhealthy centos bash - ``` - - The configurable options are as follows: - - 1. **--health-cmd**: This option is mandatory. If **0** is returned after a command is run in a container, the command execution succeeds. If a value other than **0** is returned, the command execution fails. - 2. **--health-interval**: interval between two consecutive command executions. The default value is **30s**. The upper limit of the value is the maximum value of Int64 \(unit: nanosecond\). - 3. **--health-timeout**: maximum duration for executing a single check command. If the execution times out, the command execution fails. The default value is **30s**. The upper limit of the value is the maximum value of Int64 \(unit: nanosecond\). - 4. **--health-start-period**: container initialization time. The default value is **0s**. The upper limit of the value is the maximum value of Int64 \(unit: nanosecond\). - 5. **--health-retries**: maximum number of retries for the health check. The default value is **3**. The maximum value is the maximum value of Int32. - 6. **--health-exit-on-unhealthy**: specifies whether to kill a container when it is unhealthy. The default value is **false**. - - After the container is started, the **HEALTHCHECK** configurations are written into the container configurations. You can run the **docker inspect** command to view the configurations. For example: - - ``` - "Healthcheck": { - "Test": [ - "CMD-SHELL", - "/test.sh" - ] - }, - ``` - - - -### Check Rules - -1. After a container is started, the container status is **health:starting**. -2. After the period specified by **start-period**, the **cmd** command is periodically executed in the container at the interval specified by **interval**. That is, after the command is executed, the command will be executed again after the specified period. -3. If the **cmd** command is successfully executed within the time specified by **timeout** and the return value is **0**, the check is successful. Otherwise, the check fails. If the check is successful, the container status changes to **health:healthy**. -4. If the **cmd** command fails to be executed for the number of times specified by **retries**, the container status changes to **health:unhealthy**, and the container continues the health check. -5. When the container status is **health:unhealthy**, the container status changes to **health:healthy** if a check succeeds. -6. If **--health-exit-on-unhealthy** is set, and the container exits due to reasons other than being killed \(the returned exit code is **137**\), the health check takes effect only after the container is restarted. -7. When the **cmd** command execution is complete or times out, Docker daemon will record the start time, return value, and standard output of the check to the configuration file of the container. A maximum of five latest records can be recorded. In addition, the configuration file of the container stores health check parameters. - -Run the **docker ps** command to view the container status. - -``` -[root@bac shm]# docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -7de2228674a2 testimg "bash" About an hour ago Up About an hour (unhealthy) cocky_davinci -``` - -When the container is running, the health check status is written into the container configurations. You can run the **docker inspect** command to view the configurations. - -``` -"Health": { - "Status": "healthy", - "FailingStreak": 0, - "Log": [ - { - "Start": "2018-03-07T07:44:15.481414707-05:00", - "End": "2018-03-07T07:44:15.556908311-05:00", - "ExitCode": 0, - "Output": "" - }, - { - "Start": "2018-03-07T07:44:18.557297462-05:00", - "End": "2018-03-07T07:44:18.63035891-05:00", - "ExitCode": 0, - "Output": "" - }, - ...... -} -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- A maximum of five health check status records can be stored in a container. The last five records are saved. ->- Only one health check configuration item can take effect in a container at a time. The later items configured in the Dockerfile will overwrite the earlier ones. Configurations during container creation will overwrite those in images. ->- In the Dockerfile, you can set **HEALTHCHECK NONE** to cancel the health check configuration in a referenced image. When a container is running, you can set **--no-healthcheck** to cancel the health check configuration in an image. Do not configure the health check and **--no-healthcheck** parameters at the same time during the startup. ->- After a container with configured health check parameters is started, if Docker daemon exits, the health check is not executed. After Docker daemon is restarted, the container health status changes to **starting**. Afterwards, the check rules are the same as above. ->- If health check parameters are set to **0** during container image creation, the default values are used. ->- If health check parameters are set to **0** during container startup, the default values are used. - -## Stopping and Deleting a Container - -Run the **docker stop** command to stop the container named **container1**. - -``` -[root@localhost ~]# docker stop container1 -``` - -Or run the **docker kill** command to kill and stop the container. - -``` -[root@localhost ~]# docker kill container1 -``` - -After the container is stopped, run the **docker rm** command to delete the container. - -``` -[root@localhost ~]# docker rm container1 -``` - -Or run the **docker rm -f** command to forcibly delete the container. - -``` -[root@localhost ~]# docker rm -f container1 -``` - -### Precautions - -- Do not run the **docker rm –f **_XXX_ command to delete a container. If you forcibly delete a container, the **docker rm** command ignores errors during the process, which may cause residual metadata of the container. If you delete an image in common mode and an error occurs during the deletion process, the deletion fails and no metadata remains. -- Do not run the **docker kill** command. The **docker kill** command sends related signals to service processes in a container. Depending on the signal processing policies of service processes in the container may cause the result that the signal execution cannot be performed as expected. -- A container in the restarting state may not stop immediately when you run the **docker stop** command. If a container uses the restart rules, when the container is in the restarting state, there is a low probability that the **docker stop** command on the container returns immediately. The container will still be restarted with the impact of the restart rule. -- Do not run the **docker restart** command to restart a container with the **--rm** parameter. When a container with the **--rm** parameter exits, the container is automatically deleted. If the container with the **--rm** parameter is restarted, exceptions may occur. For example, if both the **--rm** and **-ti** parameters are added when the container is started, the restart operation cannot be performed on the container, otherwise, the container may stop responding and cannot exit. - -### When Using docker stop/restart to Specify -t and t<0, Ensure That Applications in the Container Can Process Stop Signal - -Stop Principle: \(The stop process is called by **Restart**.\) - -1. The SIGTERM \(15\) signal can be sent to a container for the first time. -2. Wait for a period of time \(**t** entered by the user\). -3. If the container process still exists, send the SIGKILL \(9\) signal to forcibly kill the process. - -The meaning of the input parameter **t** \(unit: s\) is as follows: - -- **t** < 0: Wait for graceful stop. This setting is preferred when users are assured that their applications have a proper stop signal processing mechanism. -- **t** = 0: Do not wait and send **kill -9** to the container immediately. -- **t** \> 0: Wait for a specified period and send **kill -9** to the container if the container does not stop within the specified period. - -Therefore, if **t** is set to a value less than 0 \(for example, **t** = **-1**\), ensure that the container application correctly processes the SIGTERM signal. If the container ignores this signal, the container will be suspended when the **docker stop** command is run. - -### Manually Deleting Containers in the Dead State As the Underlying File System May Be Busy - -When Docker deletes a container, it stops related processes of the container, changes the container status to Dead, and then deletes the container rootfs. When the file system or devicemapper is busy, the last step of deleting rootfs fails. Run the **docker ps -a** command. The command output shows that the container is in the Dead state. Containers in the Dead state cannot be started again. Wait until the file system is not busy and run the **docker rm** command again to delete the containers. - -### In PID namespace Shared Containers, If Child Container Is in pause State, Parent Container Stops Responding and the docker run Command Cannot Be Executed - -When the **--pid** parameter is used to create the parent and child containers that share PID namespace, if any process in the child container cannot exit \(for example, it is in the D or pause state\) when the **docker stop** command is executed, the **docker stop** command of the parent container is waiting. You need to manually recover the process so that the command can be executed normally. - -In this case, run the **docker inspect** command on the container in the pause state to check whether the parent container corresponding to **PidMode** is the container that requires **docker stop**. For the required container, run the **docker unpause** command to cancel the pause state of the child container. Then, proceed to the next step. - -Generally, the possible cause is that the PID namespace corresponding to the container cannot be destroyed due to residual processes. If the problem persists, use Linux tools to obtain the residual processes and locate the cause of the process exit failure in PID namespace. After the problem is solved, the container can exit. - -- Obtain PID namespace ID in a container. - - ``` - docker inspect --format={{.State.Pid}} CONTAINERID | awk '{print "/proc/"$1"/ns/pid"}' |xargs readlink - ``` - -- Obtain threads in the namespace. - - ``` - ls -l /proc/*/task/*/ns/pid |grep -F PIDNAMESPACE_ID |awk '{print $9}' |awk -F \/ '{print $5}' - ``` - - -## Querying Container Information - -In any case, the container status should not be determined based on whether the **docker** command is successfully returned. To view the container status, you are advised to use the following command: - -``` -docker inspect -``` - -## Modification Operations - -### Precautions for Starting Multiple Processes in Container Using docker exec - -When the first **docker exec** command executed in a container is the **bash** command, ensure that all processes started by **exec** are stopped before you run the **exit** command. Otherwise, the device may stop responding when you run the **exit** command. To ensure that the process started by **exec** is still running in the background when the **exit** command is run, add **nohup** when starting the process. - -### Usage Conflict Between docker rename and docker stats _container\_name_ - -If you run the **docker stats **_container\_name_ command to monitor a container in real time, after the container is renamed by using **docker rename**, the name displayed after **docker stats** is executed is the original name instead of the renamed one. - -### Failed to Perform docker rename Operation on Container in restarting State - -When the rename operation is performed on a container in the restarting state, Docker modifies the container network configuration accordingly. The container in the restarting state may not be started and the network does not exist. As a result, the rename operation reports an error indicating that the sandbox does not exist. You are advised to rename only containers that are not in the restarting state. - -### docker cp - -1. When you run **docker cp** to copy files to a container, all operations on the container can be performed only after the **docker cp** command is executed. -2. When a container runs as a non-**root** user, and you run the **docker cp** command to copy a non-**root** file on the host to the container, the permission role of the file in the container changes to **root**. Different from the **cp** command, the **docker cp** command changes UIDs and GIDs of the files copied to the container to **root**. - -### docker login - -After the **docker login** command is executed, **usrer/passwd** encrypted by AES \(256-bit\) is saved in **/root/.docker/config.json**. At the same time, _root_**.docker/aeskey** \(permission 0600\) is generated to decrypt **usrer/passwd** in **/root/.docker/config.json**. Currently, AES key cannot be updated periodically. You need to manually delete the AES key for updating. After AES key is updated, you need to log in to Docker daemon again to push the AES key no matter whether Docker daemon is restarted. For example: - -``` -root@hello:~/workspace/dockerfile# docker login -Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one. -Username: example Password: -Login Succeeded -root@hello:~/workspace/dockerfile# docker push example/empty -The push refers to a repository [docker.io/example/empty] -547b6288eb33: Layer already exists -latest: digest: sha256:99d4fb4ce6c6f850f3b39f54f8eca0bbd9e92bd326761a61f106a10454b8900b size: 524 -root@hello:~/workspace/dockerfile# rm /root/.docker/aeskey -root@hello:~/workspace/dockerfile# docker push example/empty -WARNING: Error loading config file:/root/.docker/config.json - illegal base64 data at input byte 0 -The push refers to a repository [docker.io/example/empty] -547b6288eb33: Layer already exists -errors: -denied: requested access to the resource is denied -unauthorized: authentication required -root@hello:~/workspace/dockerfile# docker login -Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one. -Username: example -Password: -Login Succeeded -root@hello:~/workspace/dockerfile# docker push example/empty -The push refers to a repository [docker.io/example/empty] -547b6288eb33: Layer already exists -latest: digest: sha256:99d4fb4ce6c6f850f3b39f54f8eca0bbd9e92bd326761a61f106a10454b8900b size: 524 -``` - diff --git a/docs/en/docs/Container/container-management-2.md b/docs/en/docs/Container/container-management-2.md deleted file mode 100644 index 1ce5d85ff343c2f1e84c0cc1f0d917d373236146..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/container-management-2.md +++ /dev/null @@ -1,1289 +0,0 @@ -# Container Management - -- [Container Management](#container-management-2) - - [attach](#attach-41) - - [commit](#commit) - - [cp](#cp) - - [create](#create) - - [diff](#diff) - - [exec](#exec-42) - - [export](#export) - - [inspect](#inspect) - - [logs](#logs) - - [pause/unpause](#pause-unpause) - - [port](#port) - - [ps](#ps) - - [rename](#rename) - - [restart](#restart) - - [rm](#rm) - - [run](#run) - - [start](#start) - - [stats](#stats) - - [stop](#stop) - - [top](#top) - - [update](#update) - - [wait](#wait) - - -Subcommands supported by the current Docker are classified into the following groups by function: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Function

-

Command

-

Description

-

Host environment

-

version

-

Views the Docker version.

-

info

-

Views the Docker system and host environment information.

-

Container-related information

-

Container lifecycle management

-

create

-

Creates a container using an image.

-

run

-

Creates and runs a container using an image.

-

start

-

Starts a stopped container.

-

stop

-

Stops a running container.

-

restart

-

Restarts a container.

-

wait

-

Waits for a container to stop and prints the exit code.

-

rm

-

Deletes a container.

-

Container process management

-

pause

-

Suspends all processes in a container.

-

unpause

-

Resumes a suspended process in a container.

-

top

-

Views processes in a container.

-

exec

-

Executes a process in containers.

-

Container inspection tool

-

ps

-

Views running containers (without attaching any option).

-

logs

-

Displays the log information of a container.

-

attach

-

Connects standard input and output to a container.

-

inspect

-

Returns the bottom-layer information of a container.

-

port

-

Lists the port mappings between containers and hosts.

-

diff

-

Returns the changes made by the container compared with rootfs in the image.

-

cp

-

Copies files between containers and hosts.

-

export

-

Exports the file system in a container in a .tar package.

-

stats

-

Views the resource usage of a container in real time.

-

Images

-

Generates an image.

-

build

-

Creates an image using a Dockerfile.

-

commit

-

Creates an image based on the container rootfs.

-

import

-

Creates an image using the content in the .tar package as the file system.

-

load

-

Loads an image from the .tar package.

-

Image registry

-

login

-

Logs in to a registry.

-

logout

-

Logs out of a registry.

-

pull

-

Pulls an image from the registry.

-

push

-

Pushes an image to the registry.

-

search

-

Searches for an image in the registry.

-

Image management

-

images

-

Displays images in the system.

-

history

-

Displays the change history of an image.

-

rmi

-

Deletes an image.

-

tag

-

Adds a tag to an image.

-

save

-

Saves an image to a .tar package.

-

Others

-

events

-

Obtains real-time events from the Docker daemon.

-

rename

-

Renames a container.

-
- -Some subcommands have some parameters, such as **docker run**. You can run the **docker **_command _**--help** command to view the help information of the command. For details about the command parameters, see the preceding command parameter description. The following sections describe how to use each command. - - - -## attach - -Syntax: **docker attach \[**_options_**\]** _container_ - -Function: Attaches an option to a running container. - -Parameter description: - -**--no-stdin=false**: Does not attach any STDIN. - -**--sig-proxy=true**: Proxies all signals of the container, except SIGCHLD, SIGKILL, and SIGSTOP. - -Example: - -``` -$ sudo docker attach attach_test -root@2988b8658669:/# ls bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var -``` - -## commit - -Syntax: **docker commit \[**_options_**\] **_container _**\[**_repository\[:tag\]_**\]** - -Function: creates an image from a container. - -Parameter description: - -**-a**, **--author=""**: specifies an author. - -**-m**, **--message=""**: specifies the submitted information. - -**-p**, **--pause=true**: pauses the container during submission. - -Example: - -Run the following command to start a container and submit the container as a new image: - -``` -$ sudo docker commit test busybox:test -sha256:be4672959e8bd8a4291fbdd9e99be932912fe80b062fba3c9b16ee83720c33e1 - -$ sudo docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox latest e02e811dd08f 2 years ago 1.09MB -``` - -   - -## cp - -Syntax: **docker cp \[**_options_**\] **_container_**:**_src\_path_ _dest\_path_**|-** - -**docker cp \[**_options_**\]** _src\_path_**|-** _container_**:**_dest\_path_ - -Function: Copies a file or folder from a path in a container to a path on the host or copies a file or folder from the host to the container: - -Precautions: The **docker cp** command does not support the copy of files in virtual file systems such as **/proc**, **/sys**, **/dev**, and **/tmp** in the container and files in the file systems mounted by users in the container. - -Parameter description: - -**-a**, **--archive**: Sets the owner of the file copied to the container to the **container** user \(**--user**\). - -**-L**, **--follow-link**: Parses and traces the symbolic link of a file. - -Example: - -Run the following command to copy the **/test** directory in the registry container to the **/home/**_aaa_ directory on the host: - -``` -$ sudo docker cp registry:/test /home/aaa -``` - -## create - -Syntax: **docker create \[**_options_**\]** _image_ **\[**_command_**\] \[**_arg_**...\]** - -Function: Creates a container using an image file and return the ID of the container. After the container is created, run the **docker start** command to start the container. _options_ are used to configure the container during container creation. Some parameters will overwrite the container configuration in the image file. _command_ indicates the command to be executed during container startup. - -Parameter description: - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

-a --attach=[]

-

Attaches the console to the STDIN, STDOUT, and STDERR of the process in the container.

-

--name=""

-

Name of a container.

-

--add-host=[host:ip]

-

Adds a mapping between the host name and IP address to the /etc/hosts in the container.

-

For example, --add-host=test:10.10.10.10.

-

--annotation

-

Sets annotations for the container. For example, set the native.umask parameter.

-
--annotation native.umask=normal #The umask value of the started container is 0022.
---annotation native.umask=secure #The umask value of the started container is 0027.
-

If this parameter is not set, the umask configuration in dockerd is used.

-

--blkio-weight

-

Relative weight of blockio, which ranges from 10 to 1000.

-

--blkio-weight-device=[]

-

Blockio weight, which configures the relative weight.

-

-c, --cpu-shares=0

-

Relative weight of the host CPU obtained by the container. This parameter can be used to obtain a higher priority. By default, all containers obtain the same CPU priority.

-

--cap-add=[]

-

Adds Linux functions.

-

--cap-drop=[]

-

Clears Linux functions.

-

--cgroup-parent

-

cgroup parent directory for the container.

-

--cidfile=""

-

Writes the container ID to a specified file.

-

For example: --cidfile=/home/cidfile-test writes the container ID to the /home/cidfile-test file.

-

--cpu-period

-

CPU CFS period.

-

The default value is 100 ms. Generally, --cpu-period and --cpu-quota are used together. For example, --cpu-period=50000 --cpu-quota=25000 indicates that if there is one CPU, the container can obtain 50% of the CPU every 50 ms.

-

--cpus=0.5 has the same effect.

-

--cpu-quota

-

CPU CFS quota. The default value is 0, indicating that there is no restriction on the quota.

-

--cpuset-cpus

-

Number of CPUs (0-3, 0, 1) that can be used by processes in the container. By default, there is no restriction on this parameter.

-

--cpuset-mems

-

Memory nodes (0-3, 0, 1) for running processes in the container. This parameter is valid only for the NUMA system.

-

--device=[]

-

Adds the host device to a container, for example, --device=/dev/sdc:/dev/xvdc:rwm.

-

--dns=[]

-

Forcibly enables the container to use the specified DNS server. For example, --dns=114.114.xxx.xxx indicates that nameserver 114.114.xxx.xxx is written to /etc/resolv.conf of the created container and the original content is overwritten.

-

--dns-opt=[]

-

DNS options.

-

--dns-search=[]

-

Forcibly searches DNS search domain name used by a container.

-

-e, --env=[]

-

Sets environment variable for the container.

-

--env=[KERNEL_MODULES=]:

-

Inserts a specified module into a container. Currently, only the modules on the host can be inserted. After the container is deleted, the modules still reside on the host, and the --hook-spec option must be configured for the container. The following are valid parameter formats:

-

KERNEL_MODULERS=

-

KERNEL_MODULERS=a

-

KERNEL_MODULERS=a,b

-

KERNEL_MODULERS=a,b,

-

--entrypoint=""

-

Overwrites the original entrypoint in the image. The entrypoint is used to set the command executed when the container is started.

-

--env-file=[]

-

Reads environment variables from a file. Multiple environment variables are separated by lines in the file. For example: --env-file=/home/test/env indicates multiple environment variables are stored in the env file.

-

--expose=[]

-

Enables an internal port of a container. The -P option described in the following section maps the enabled port to a port on the host.

-

--group-add=[]

-

Adds a specified container to an additional group.

-

-h, --hostname=""

-

Host name.

-

--health-cmd

-

Container health check command.

-

--health-interval

-

Interval between two consecutive command executions. The default value is 30s.

-

--health-timeout

-

Maximum duration for executing a single check command. If the execution times out, the command fails to be executed. The default value is 30s.

-

--health-start-period

-

Interval between the time when the container is started and the time when the first health check is performed. The default value is 0s.

-

--health-retries

-

Maximum number of retries after a health check fails. The default value is 3.

-

--health-exit-on-unhealthy

-

Specifies whether to stop a container when the container is unhealthy. The default value is false.

-

--host-channel=[]

-

Sets a channel for communication between processes in the container and the host, in host path:container path:rw/ro:size limit format.

-

-i, --interactive=false

-

Enables STDIN even if it is not attached.

-

--ip

-

IPv4 address of a container.

-

--ip6

-

IPv6 address of a container.

-

--ipc

-

IPC namespace of a container.

-

--isolation

-

Container isolation policy.

-

-l, --label=[]

-

Label of a container.

-

--label-file=[]

-

Obtains the label from the file.

-

--link=[]

-

Links to another container. This parameter adds environment variables of the IP address and port number of the linked container to the container and adds a mapping to the /etc/hosts file, for example, --link=name:alias.

-

--log-driver

-

Log driver of a container.

-

--log-opt=[]

-

Log driver option.

-

-m, --memory=""

-

Memory limit of a container. The format is numberoptional unit, and available units are b, k, m, and g. The minimum value of this parameter is 4m.

-

--mac-address

-

MAC address of a container, for example, 92:d0:c6:0a:xx:xx.

-

--memory-reservation

-

Container memory limit. The default value is the same as that of --memory. --memory is a hard limit, and --memory-reservation is a soft limit. When the memory usage exceeds the preset value, the memory usage is dynamically adjusted (the system attempts to reduce the memory usage to a value less than the preset value when reclaiming the memory). However, the memory usage may exceed the preset value. Generally, this parameter can be used together with --memory. The value must be less than the preset value of --memory.

-

--memory-swap

-

Total usage of the common memory and swap partition. -1 indicates no restriction is set on the usage. If this parameter is not set, the swap partition size is twice the value of --memory. That is, the swap partition can use the same amount of memory as --memory.

-

--memory-swappiness=-1

-

Time when the container uses the swap memory. The value ranges from 0 to 100, in percentage.

-

--net="bridge"

-

Network mode of the container. Docker 1.3.0 has the following network modes: bridge, host, none, and container:name|id. The default value is bridge.

-
  • bridge: Creates a network stack on the bridge when the Docker daemon is started.
  • host: Uses the network stack of the host in the container.
  • none: Does not use networks.
  • container:name|id: Reuses the network stack of another container.
-

--no-healthcheck

-

Does not perform health check for a container.

-

--oom-kill-disable

-

Disables the OOM killer. You are advised not to set this parameter if the -m parameter is not set.

-

--oom-score-adj

-

Adjusts the OOM rule of a container. The value ranges from -1000 to 1000.

-

-P, --publish-all=false

-

Maps all enabled ports of a container to host ports. Containers can be accessed through the host ports. You can run the docker port command to view the mapping between container ports and host ports.

-

-p, --publish=[]

-

Maps a port in a container to a port on the host, in IP address:host port:container port | IP address::container port | host port:container port | container port format. If no IP address is configured, accesses of all NICs on the host is listened. If no host port is configured, the host port is automatically allocated.

-

--pid

-

PID namespace of a container.

-

--privileged=false

-

Grants extra permission to a container. If the --privileged option is used, the container can access all devices on the host.

-

--restart=""

-

Configures restart rule when the container exits. Currently, version 1.3.1 supports the following rules:

-
  • no: indicates that the container is not restarted when it is stopped.
  • on-failure: indicates that the container is restarted when the container exit code is not 0. This rule can be used to add the maximum number of restart times, for example, on-failure:5, indicating that the container can be restarted for a maximum of five times.
  • always: indicates the container is exited regardless of the exit code.
-

--read-only

-

Mounts the root file system of the container in read-only mode.

-

--security-opt=[]

-

Container security rule.

-

--shm-size

-

Size of the /dev/shm device. The default value is 64M.

-

--stop-signal=SIGTERM

-

Container stop signal. The default value is SIGTERM.

-

-t, --tty=false

-

Allocates a pseudo terminal.

-

--tmpfs=[]

-

Mounts the tmpfs directory.

-

-u, --user=""

-

User name or user ID.

-

--ulimit=[]

-

ulimit option.

-

--userns

-

User namespace of a container.

-

-v, --volume=[]

-

Mounts a directory of the host to the container, or create a volume in the container. For example, -v /home/test:/home mounts the /home/test directory of the host to the /home directory of the container, and -v /tmp creates the tmp folder in the root directory of the container, the folder can be shared by other containers using the --volumes-from option. The host directory cannot be mounted to the /proc subdirectory of the container. Otherwise, an error is reported when the container is started.

-

--volume-driver

-

Data volume driver of the container. This parameter is optional.

-

--volumes-from=[]

-

Mounts the volume of another container to the current container to share the volume. For example, -volumes-from container_name mounts the volume of container_name to the current container. -v and --volumes-from=[] are two very important options for data backup and live migration.

-

-w, --workdir=""

-

Specifies the working directory of the container.

-
- -Example: - -Run the following command to create a container named **busybox** and run the **docker start** command to start the container. - -``` -$ sudo docker create -ti --name=busybox busybox /bin/bash -``` - -## diff - -Syntax: **docker diff** _container_ - -Function: Checks the differences between containers and determines the changes have been made compared with the container creation. - -Parameter description: none. - -Example: - -``` -$ sudo docker diff registry -C /root -A /root/.bash_history -A /test -``` - -   - -## exec - -Syntax: **docker exec \[**_options_**\]** _container_ _command_ **\[**_arg..._**\]** - -Function: Runs a command in the container. - -Parameter description: - -**-d** and **--detach=false**: Run in the background. - -**-i** and **--interactive=false**: Keep the STDIN of the container enabled. - -**-t** and **--tty=false**: Allocate a virtual terminal. - -**--privileged**: Executes commands in privilege mode. - -**-u** and **--user**: Specifies the user name or UID. - -Example: - -``` -$ sudo docker exec -ti exec_test ls -bin etc lib media opt root sbin sys tmp var -dev home lib64 mnt proc run srv test usr -``` - -   - -## export - -Syntax: **docker export** _container_ - -Function: Exports the file system content of a container to STDOUT in .tar format. - -Parameter description: none. - -Example: - -Run the following commands to export the contents of the container named **busybox** to the **busybox.tar** package: - -``` -$ sudo docker export busybox > busybox.tar -$ ls -busybox.tar -``` - -   - -## inspect - -Syntax: **docker inspect \[**_options_**\] **_container_**|**_image _**\[**_container_|_image..._**\]** - -Function: Returns the underlying information about a container or image. - -Parameter description: - -**-f** and **--format=""**: Output information in a specified format. - -**-s** and **--size**: Display the total file size of the container when the query type is container. - -**--type**: Returns the JSON format of the specified type. - -**-t** and **--time=120**: Timeout interval, in seconds. If the **docker inspect** command fails to be executed within the timeout interval, the system stops waiting and immediately reports an error. The default value is **120**. - -Example: - -1. Run the following command to return information about a container: - - ``` - $ sudo docker inspect busybox_test - [ - { - "Id": "9fbb8649d5a8b6ae106bb0ac7686c40b3cbd67ec2fd1ab03e0c419a70d755577", - "Created": "2019-08-28T07:43:51.27745746Z", - "Path": "bash", - "Args": [], - "State": { - "Status": "running", - "Running": true, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Dead": false, - "Pid": 64177, - "ExitCode": 0, - "Error": "", - "StartedAt": "2019-08-28T07:43:53.021226383Z", - "FinishedAt": "0001-01-01T00:00:00Z" - }, - ...... - ``` - -    - -2. Run the following command to return the specified information of a container in a specified format. The following uses the IP address of the busybox\_test container as an example. - - ``` - $ sudo docker inspect -f {{.NetworkSettings.IPAddress}} busybox_test - 172.17.0.91 - ``` - - -## logs - -Syntax: **docker logs \[**_options_**\]** _container_ - -Function: Captures logs in a container that is in the **running** or **stopped** state. - -Parameter description: - -**-f** and **--follow=false**: Print logs in real time. - -**-t** and **--timestamps=false**: Display the log timestamp. - -**--since**: Displays logs generated after the specified time. - -**--tail="all"**: Sets the number of lines to be displayed. By default, all lines are displayed. - -Example: - -1. Run the following command to check the logs of the jaegertracing container where a jaegertracing service runs: - - ``` - $ sudo docker logs jaegertracing - {"level":"info","ts":1566979103.3696961,"caller":"healthcheck/handler.go:99","msg":"Health Check server started","http-port":14269,"status":"unavailable"} - {"level":"info","ts":1566979103.3820567,"caller":"memory/factory.go:55","msg":"Memory storage configuration","configuration":{"MaxTraces":0}} - {"level":"info","ts":1566979103.390773,"caller":"tchannel/builder.go:94","msg":"Enabling service discovery","service":"jaeger-collector"} - {"level":"info","ts":1566979103.3908608,"caller":"peerlistmgr/peer_list_mgr.go:111","msg":"Registering active peer","peer":"127.0.0.1:14267"} - {"level":"info","ts":1566979103.3922884,"caller":"all-in-one/main.go:186","msg":"Starting agent"} - {"level":"info","ts":1566979103.4047635,"caller":"all-in-one/main.go:226","msg":"Starting jaeger-collector TChannel server","port":14267} - {"level":"info","ts":1566979103.404901,"caller":"all-in-one/main.go:236","msg":"Starting jaeger-collector HTTP server","http-port":14268} - {"level":"info","ts":1566979103.4577134,"caller":"all-in-one/main.go:256","msg":"Listening for Zipkin HTTP traffic","zipkin.http-port":9411} - ``` - -    - -2. Add **-f** to the command to output the logs of the jaegertracing container in real time. - - ``` - $ sudo docker logs -f jaegertracing - {"level":"info","ts":1566979103.3696961,"caller":"healthcheck/handler.go:99","msg":"Health Check server started","http-port":14269,"status":"unavailable"} - {"level":"info","ts":1566979103.3820567,"caller":"memory/factory.go:55","msg":"Memory storage configuration","configuration":{"MaxTraces":0}} - {"level":"info","ts":1566979103.390773,"caller":"tchannel/builder.go:94","msg":"Enabling service discovery","service":"jaeger-collector"} - {"level":"info","ts":1566979103.3908608,"caller":"peerlistmgr/peer_list_mgr.go:111","msg":"Registering active peer","peer":"127.0.0.1:14267"} - {"level":"info","ts":1566979103.3922884,"caller":"all-in-one/main.go:186","msg":"Starting agent"} - ``` - -    - - -## pause/unpause - -Syntax: **docker pause** _container_ - -**docker unpause** _container_ - -Function: The two commands are used in pairs. The **docker pause** command suspends all processes in a container, and the **docker unpause** command resumes the suspended processes. - -Parameter description: none. - -Example: - -The following uses a container where the docker registry service runs as an example. After the **docker pause** command is executed to pause the process of the container, access of the registry service by running the **curl** command is blocked. You can run the **docker unpause** command to resume the suspended registry service. The registry service can be accessed by running the **curl** command. - -1. Run the following command to start a registry container: - - ``` - $ sudo docker run -d --name pause_test -p 5000:5000 registry - ``` - - Run the **curl** command to access the service. Check whether the status code **200 OK** is returned. - - ``` - $ sudo curl -v 127.0.0.1:5000 - ``` - -    - -2. Run the following command to stop the processes in the container: - - ``` - $ sudo docker pause pause_test - ``` - - Run the **curl** command to access the service to check whether it is blocked and wait until the service starts. - -3. Run the following command to resume the processes in the container: - - ``` - $ sudo docker unpause pause_test - ``` - - The cURL access in step 2 is restored and the request status code **200 OK** is returned. - -    - - -## port - -Syntax: **docker port **_container_ **\[**_private\_port\[/proto\]_**\]** - -Function: Lists the port mapping of a container or queries the host port where a specified port resides. - -Parameter description: none. - -Example: - -1. Run the following command to list all port mappings of a container: - - ``` - $ sudo docker port registry - 5000/tcp -> 0.0.0.0.:5000 - ``` - -2. Run the following command to query the mapping of a specified container port: - - ``` - $ sudo docker port registry 5000 - 0.0.0.0.:5000 - ``` - - -## ps - -Syntax:** docker ps \[**_options_**\]** - -Function: Lists containers in different states based on different parameters. If no parameter is added, all running containers are listed. - -Parameter description: - -**-a** and **--all=false**: Display the container. - -**-f** and **--filter=\[\]**: Filter values. The available options are: **exited=**_int_ \(exit code of the container\) **status=**_restarting|running|paused|exited_ \(status code of the container\), for example, **-f status=running**: lists the running containers. - -**-l** and **--latest=false**: List the latest created container. - -**-n=-1**: Lists the latest created _n_ containers. - -**--no-trunc=false**: Displays all 64-bit container IDs. By default, 12-bit container IDs are displayed. - -**-q** and **--quiet=false**: Display the container ID. - -**-s** and **--size=false**: Display the container size. - -Example: - -1. Run the following command to lists running containers: - - ``` - $ sudo docker ps - ``` - -2. Run the following command to display all containers: - - ``` - $ sudo docker ps -a - ``` - - -## rename - -Syntax: **docker rename OLD\_NAME NEW\_NAME** - -Function: Renames a container. - -Example: - -Run the **docker run** command to create and start a container, run the **docker rename** command to rename the container, and check whether the container name is changed. - -``` -$ sudo docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -b15976967abb busybox:latest "bash" 3 seconds ago Up 2 seconds festive_morse -$ sudo docker rename pedantic_euler new_name -$ sudo docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -b15976967abb busybox:latest "bash" 34 seconds ago Up 33 seconds new_name -``` - -   - -## restart - -Syntax: **docker restart \[**_options_**\]** _container_ **\[**_container..._**\]** - -Function: Restarts a running container. - -Parameter description: - -**-t** and **--time=10**: Number of seconds to wait for the container to stop before the container is killed. If the container has stopped, restart the container. The default value is **10**. - -Example: - -``` -$ sudo docker restart busybox -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->During the container restart, if a process in the **D** or **Z** state exists in the container, the container may fail to be restarted. In this case, you need to analyze the cause of the **D** or **Z** state of the process in the container. Restart the container after the **D** or **Z** state of the process in the container is released. - -## rm - -Syntax: **docker rm \[**_options_**\] **_container_ **\[**_container..._**\]** - -Function: Deletes one or more containers. - -Parameter description: - -**-f** and **--force=false**: Forcibly delete a running container. - -**-l** and **--link=false**: Remove the specified link and do not remove the underlying container. - -**-v** and **--volumes=false**: Remove the volumes associated with the container. - -Example: - -1. Run the following command to delete a stopped container: - - ``` - $ sudo docker rm test - ``` - -2. Run the following command to delete a running container: - - ``` - $ sudo docker rm -f rm_test - ``` - - -## run - -Syntax: **docker run \[**_options_**\] **_image_ **\[**_command_**\] \[**_arg_**...\]** - -Function: Creates a container from a specified image \(if the specified image does not exist, an image is downloaded from the official image registry\), starts the container, and runs the specified command in the container. This command integrates the **docker create**, **docker start**, and **docker exec** commands. - -Parameter description: \(The parameters of this command are the same as those of the **docker create** command. For details, see the parameter description of the **docker create** command. Only the following two parameters are different.\) - -**--rm=false**: Specifies the container to be automatically deleted when it exits. - -**-v**: Mounts a local directory or an anonymous volume to the container. Note: When a local directory is mounted to a container with a SELinux security label, do not add or delete the local directory at the same time. Otherwise, the security label may not take effect. - -**--sig-proxy=true**: Receives proxy of the process signal. SIGCHLD, SIGSTOP, and SIGKILL do not use the proxy. - -Example: - -Run the busybox image to start a container and run the **/bin/sh** command after the container is started: - -``` -$ sudo docker run -ti busybox /bin/sh -``` - -## start - -Syntax: **docker start \[**_options_**\]** _container_ **\[**_container_**...\]** - -Function: Starts one or more containers that are not running. - -Parameter description: - -**-a** and **--attach=false**: Attach the standard output and error output of a container to STDOUT and STDERR of the host. - -**-i** and **--interactive=false**: Attach the standard input of the container to the STDIN of the host. - -Example: - -Run the following command to start a container named **busybox** and add the **-i -a** to the command to add standard input and output. After the container is started, directly enter the container. You can exist the container by entering **exit**. - -If **-i -a** is not added to the command when the container is started, the container is started in the background. - -``` -$ sudo docker start -i -a busybox -``` - -## stats - -Syntax: **docker stats \[**_options_**\] \[**_container_**...\]** - -Function: Continuously monitors and displays the resource usage of a specified container. \(If no container is specified, the resource usage of all containers is displayed by default.\) - -Parameter description: - -**-a**, and **--all**: Display information about all containers. By default, only running containers are displayed. - -**--no-stream**: Displays only the first result and does not continuously monitor the result. - -Example: - -Run the **docker run** command to start and create a container, and run the **docker stats** command to display the resource usage of the container: - -``` -$ sudo docker stats -CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS -2e242bcdd682 jaeger 0.00% 77.08MiB / 125.8GiB 0.06% 42B / 1.23kB 97.9MB / 0B 38 -02a06be42b2c relaxed_chandrasekhar 0.01% 8.609MiB / 125.8GiB 0.01% 0B / 0B 0B / 0B 10 -deb9e49fdef1 hardcore_montalcini 0.01% 12.79MiB / 125.8GiB 0.01% 0B / 0B 0B / 0B 9 -``` - -   - -## stop - -Syntax: **docker stop \[**_options_**\]** _container_ **\[**_container_**...\]** - -Function: Sends a SIGTERM signal to a container and then sends a SIGKILL signal to stop the container after a certain period. - -Parameter description: - -**-t** and **--time=10**: Number of seconds that the system waits for the container to exit before the container is killed. The default value is **10**. - -Example: - -``` -$ sudo docker stop -t=15 busybox -``` - -## top - -Syntax: **docker top** _container_ **\[**_ps options_**\]** - -Function: Displays the processes running in a container. - -Parameter description: none. - -Example: - -Run the top\_test container and run the **top** command in the container. - -``` -$ sudo docker top top_test -UID PID PPID C STIME TTY TIME CMD -root 70045 70028 0 15:52 pts/0 00:00:00 bash -``` - -The value of **PID** is the PID of the process in the container on the host. - -## update - -Syntax: **docker update \[**_options_**\]** _container_ **\[**_container_**...\]** - -Function: Hot changes one or more container configurations. - -Parameter description: - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

--accel=[]

-

Configures one or more container accelerators.

-

--blkio-weight

-

Relative weight of the container blockio. The value ranges from 10 to 1000.

-

--cpu-shares

-

Relative weight of the host CPU obtained by the container. This parameter can be used to obtain a higher priority. By default, all containers obtain the same CPU priority.

-

--cpu-period

-

CPU CFS period.

-

The default value is 100 ms. Generally, --cpu-period and --cpu-quota are used together. For example, --cpu-period=50000 --cpu-quota=25000 indicates that if there is one CPU, the container can obtain 50% of the CPU every 50 ms.

-

--cpu-quota

-

CPU CFS quota. The default value is 0, indicating that there is no restriction on the quota.

-

--cpuset-cpus

-

Number of CPUs (0-3, 0, 1) that can be used by processes in the container. By default, there is no restriction on this parameter.

-

--cpuset-mems

-

Memory nodes (0-3, 0, 1) for running processes in the container. This parameter is valid only for the NUMA system.

-

--kernel-memory=""

-

Kernel memory limit of a container. The format is numberoptional unit, and available units are b, k, m, and g.

-

-m, --memory=""

-

Memory limit of a container. The format is numberoptional unit, and available units are b, k, m, and g. The minimum value of this parameter is 4m.

-

--memory-reservation

-

Container memory limit. The default value is the same as that of --memory. --memory is a hard limit, and --memory-reservation is a soft limit. When the memory usage exceeds the preset value, the memory usage is dynamically adjusted (the system attempts to reduce the memory usage to a value less than the preset value when reclaiming the memory). However, the memory usage may exceed the preset value. Generally, this parameter can be used together with --memory. The value must be less than the preset value of --memory.

-

--memory-swap

-

Total usage of the common memory and swap partition. -1 indicates no restriction is set on the usage. If this parameter is not set, the swap partition size is twice the value of --memory. That is, the swap partition can use the same amount of memory as --memory.

-

--restart=""

-

Configures restart rule when the container exits. Currently, version 1.3.1 supports the following rules:

-
  • no: indicates that the container is not restarted when it is stopped.
  • on-failure: indicates that the container is restarted when the container exit code is not 0. This rule can be used to add the maximum number of restart times, for example, on-failure:5, indicating that the container can be restarted for a maximum of five times.
  • always: indicates the container is exited regardless of the exit code.
-

--help

-

Help information.

-
- -Example: - -Run the following command to change the CPU and memory configurations of the container named **busybox**, including changing the relative weight of the host CPU obtained by the container to **512**, the CPU cores that can be run by processes in the container to **0,1,2,3**, and the memory limit for running the container to **512 m**. - -``` -$ sudo docker update --cpu-shares 512 --cpuset-cpus=0,3 --memory 512m ubuntu -``` - -## wait - -Syntax: **docker wait** _container_ **\[**_container..._**\]** - -Function: Waits for a container to stop and print the exit code of the container: - -Parameter description: none. - -Example: - -Run the following command to start a container named **busybox**: - -``` -$ sudo docker start -i -a busybox -``` - -Run the **docker wait** command: - -``` -$ sudo docker wait busybox -0 -``` - -Wait until the busybox container exits. After the busybox container exits, the exit code **0** is displayed. - diff --git a/docs/en/docs/Container/container-management.md b/docs/en/docs/Container/container-management.md deleted file mode 100644 index ef6dc65d475f8680e1908be5eb7c62a718944d89..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/container-management.md +++ /dev/null @@ -1,1967 +0,0 @@ -# Container Management - -- [Container Management](#container-management) - - [Creating a Container](#creating-a-container) - - [Starting a Container](#starting-a-container) - - [Running a Container](#running-a-container) - - [Stopping a Container](#stopping-a-container) - - [Forcibly Stopping a Container](#forcibly-stopping-a-container) - - [Removing a Container](#removing-a-container) - - [Attaching to a Container](#attaching-to-a-container) - - [Renaming a Container](#renaming-a-container) - - [Executing a Command in a Running Container](#executing-a-command-in-a-running-container) - - [Querying Information About a Single Container](#querying-information-about-a-single-container) - - [Querying Information About All Containers](#querying-information-about-all-containers) - - [Restarting a Container](#restarting-a-container) - - [Waiting for a Container to Exit](#waiting-for-a-container-to-exit) - - [Viewing Process Information in a Container](#viewing-process-information-in-a-container) - - [Displaying Resource Usage Statistics of a Container](#displaying-resource-usage-statistics-of-a-container) - - [Obtaining Container Logs](#obtaining-container-logs) - - [Copying Data Between a Container and a Host](#copying-data-between-a-container-and-a-host) - - [Pausing a Container](#pausing-a-container) - - [Resuming a Container](#resuming-a-container) - - [Obtaining Event Messages from the Server in Real Time](#obtaining-event-messages-from-the-server-in-real-time) - - - - -## Creating a Container - -### Description - -To create a container, run the **isula create** command. The container engine will use the specified container image to create a read/write layer, or use the specified local rootfs as the running environment of the container. After the creation is complete, the container ID is output as standard output. You can run the **isula start** command to start the container. The new container is in the **inited** state. - -### Usage - -``` -isula create [OPTIONS] IMAGE [COMMAND] [ARG...] -``` - -### Parameters - -The following table lists the parameters supported by the **create** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

create

-

  

-

--annotation

-

Sets annotations for the container. For example, set the native.umask parameter.

-
--annotation native.umask=normal #The umask value of the started container is 0022.
---annotation native.umask=secure #The umask value of the started container is 0027.
-

If this parameter is not set, the umask configuration in iSulad is used.

-

--cap-drop

-

Deletes Linux permissions.

-

--cgroup-parent

-

Specifies the cgroup parent path of the container.

-

--cpuset-cpus

-

Allowed CPUs (for example, 0-3, 0, 1).

-

--cpu-shares

-

CPU share (relative weight).

-

--cpu-quota

-

Limits the CPU CFS quota.

-

--device=[]

-

Adds a device to the container.

-

--dns

-

Adds a DNS server.

-

--dns-opt

-

Adds DNS options.

-

--dns-search

-

Sets the search domain of a container.

-

-e, --env

-

Sets environment variables.

-

--env-file

-

Configures environment variables using a file.

-

--entrypoint

-

Entry point to run when the container is started.

-

--external-rootfs=PATH

-

Specifies a rootfs (a folder or block device) that is not managed by iSulad for the container.

-

--files-limit

-

Limits the number of file handles that can be opened in a container. The value -1 indicates no limit.

-

--group-add=[]

-

Adds additional user groups to the container.

-

--help

-

Displays help information.

-

--health-cmd

-

Command executed in a container.

-

--health-exit-on-unhealthy

-

Determines whether to kill a container when the container is detected unhealthy.

-

--health-interval

-

Interval between two consecutive command executions.

-

--health-retries

-

Maximum number of health check retries.

-

--health-start-period

-

Container initialization interval.

-

--health-timeout

-

Maximum time for executing a single check command.

-

--hook-spec

-

Hook configuration file.

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-h, --hostname

-

Container host name.

-

-i, --interactive

-

Enables the standard input of the container even if it is not connected to the standard input of the container.

-

--hugetlb-limit=[]

-

Limits the size of huge-page files, for example, --hugetlb-limit 2MB:32MB.

-

--log-opt=[]

-

Log driver option. By default, the container serial port log function is disabled. You can run the --log-opt disable-log=false command to enable it.

-

-l,--label

-

Sets a label for a container.

-

--lablel-file

-

Sets container labels using files.

-

-m, --memory

-

Memory limit.

-

--memory-reservation

-

Sets the container memory limit. The default value is the same as that of --memory. --memory is a hard limit, and --memory-reservation is a soft limit. When the memory usage exceeds the preset value, the memory usage is dynamically adjusted (the system attempts to reduce the memory usage to a value less than the preset value when reclaiming the memory). However, the memory usage may exceed the preset value. Generally, this parameter can be used together with --memory. The value must be less than the preset value of --memory. The minimum value is 4 MB.

-

--memory-swap

-

Memory swap space, which should be a positive integer. The value -1 indicates no limit.

-

--memory-swappiness

-

The value of swappiness is a positive integer ranging from 0 to 100. The smaller the value is, the less the swap partition is used and the more the memory is used in the Linux system. The larger the value is, the more the swap space is used by the kernel. The default value is –1, indicating that the default system value is used.

-

--mount

-

Mounts the host directory, volume, or file system to the container.

-

--no-healthcheck

-

Disables the health check configuration.

-

--name=NAME

-

Container name.

-

--net=none

-

Connects a container to a network.

-

--pids-limit

-

Limits the number of processes that can be executed in the container. The value -1 indicates no limit.

-

--privileged

-

Grants container extension privileges.

-

-R, --runtime

-

Container runtime. The parameter value can be lcr, which is case insensitive. Therefore, LCR and lcr are equivalent.

-

--read-only

-

Sets the rootfs of a container to read-only.

-

--restart

-

Restart policy upon container exit.

-

For a system container, --restart on-reboot is supported.

-

--storage-opt

-

Configures the storage driver option for a container.

-

-t, --tty

-

Allocates a pseudo terminal.

-

--ulimit

-

Sets the ulimit for a container.

-

-u, --user

-

User name or UID, in the format of [<name|uid>][:<group|gid>].

-

-v, --volume=[]

-

Mounts a volume.

-

--volumes-from=[]

-

Uses the mounting configuration of the specified container.

-
- -### Constraints - -- When the **--user** or **--group-add** parameter is used to verify the user or group during container startup, if the container uses an OCI image, the verification is performed in the **etc/passwd** and **etc/group** files of the actual rootfs of the image. If a folder or block device is used as the rootfs of the container, the **etc/passwd** and **etc/group** files in the host are verified. The rootfs ignores mounting parameters such as **-v** and **--mount**. That is, when these parameters are used to attempt to overwrite the **etc/passwd** and **etc/group** files, the parameters do not take effect during the search and take effect only when the container is started. The generated configuration is saved in the **iSulad root directory/engine/container ID/start\_generate\_config.json** file. The file format is as follows: - - ``` - { - "uid": 0, - "gid": 8, - "additionalGids": [ - 1234, - 8 - ] - } - ``` - - -### Example - -Create a container. - -``` -$ isula create busybox -fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -$ isula ps -a -STATUS PID IMAGE COMMAND EXIT_CODE RESTART_COUNT STARTAT FINISHAT RUNTIME ID NAMES inited - busybox "sh" 0 0 - - lcr fd7376591a9c fd7376591a9c4521... -``` - -## Starting a Container - -### Description - -To start one or more containers, run the **isula start** command. - -### Usage - -``` -isula start [OPTIONS] CONTAINER [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **start** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

start

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-R, --runtime

-

Container runtime. The parameter value can be lcr, which is case insensitive. Therefore, LCR and lcr are equivalent.

-
- -### Example - -Start a new container. - -``` -$ isula start fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -``` - -## Running a Container - -### Description - -To create and start a container, run the **isula run** command. You can use a specified container image to create a container read/write layer and prepare for running the specified command. After the container is created, run the specified command to start the container. The **run** command is equivalent to creating and starting a container. - -### Usage - -``` -isula run [OPTIONS] ROOTFS|IMAGE [COMMAND] [ARG...] -``` - -### Parameters - -The following table lists the parameters supported by the **run** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

run

-

--annotation

-

Sets annotations for the container. For example, set the native.umask option.

-
--annotation native.umask=normal #The umask value of the started container is 0022.
---annotation native.umask=secure #The umask value of the started container is 0027.
-

If this parameter is not set, the umask configuration in iSulad is used.

-

--cap-add

-

Adds Linux functions.

-

--cap-drop

-

Deletes Linux functions.

-

--cgroup-parent

-

Specifies the cgroup parent path of the container.

-

--cpuset-cpus

-

Allowed CPUs (for example, 0-3, 0, 1).

-

--cpu-shares

-

CPU share (relative weight).

-

--cpu-quota

-

Limits the CPU CFS quota.

-

-d, --detach

-

Runs the container in the background and displays the container ID.

-

--device=[]

-

Adds a device to the container.

-

--dns

-

Adds a DNS server.

-

--dns-opt

-

Adds DNS options.

-

--dns-search

-

Sets the search domain of a container.

-

-e, --env

-

Sets environment variables.

-

--env-file

-

Configures environment variables using a file.

-

--entrypoint

-

Entry point to run when the container is started.

-

--external-rootfs=PATH

-

Specifies a rootfs (a folder or block device) that is not managed by iSulad for the container.

-

--files-limit

-

Limits the number of file handles that can be opened in the container. The value -1 indicates no limit.

-

--group-add=[]

-

Adds additional user groups to the container.

-

--help

-

Displays help information.

-

--health-cmd

-

Command executed in a container.

-

--health-exit-on-unhealthy

-

Determines whether to kill a container when the container is detected unhealthy.

-

--health-interval

-

Interval between two consecutive command executions.

-

--health-retries

-

Maximum number of health check retries.

-

--health-start-period

-

Container initialization interval.

-

--health-timeout

-

Maximum time for executing a single check command.

-

--hook-spec

-

Hook configuration file.

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-h, --hostname

-

Container host name.

-

--hugetlb-limit=[]

-

Limits the size of huge-page files, for example, --hugetlb-limit 2MB:32MB.

-

-i, --interactive

-

Enables the standard input of the container even if it is not connected to the standard input of the container.

-

--log-opt=[]

-

Log driver option. By default, the container serial port log function is disabled. You can run the --log-opt disable-log=false command to enable it.

-

-m, --memory

-

Memory limit.

-

--memory-reservation

-

Sets the container memory limit. The default value is the same as that of --memory. --memory is a hard limit, and --memory-reservation is a soft limit. When the memory usage exceeds the preset value, the memory usage is dynamically adjusted (the system attempts to reduce the memory usage to a value less than the preset value when reclaiming the memory). However, the memory usage may exceed the preset value. Generally, this parameter can be used together with --memory. The value must be less than the preset value of --memory. The minimum value is 4 MB.

-

--memory-swap

-

Memory swap space, which should be a positive integer. The value -1 indicates no limit.

-

--memory-swappiness

-

The value of swappiness is a positive integer ranging from 0 to 100. The smaller the value is, the less the swap partition is used and the more the memory is used in the Linux system. The larger the value is, the more the swap space is used by the kernel. The default value is –1, indicating that the default system value is used.

-

--mount

-

Mounts a host directory to a container.

-

--no-healthcheck

-

Disables the health check configuration.

-

--name=NAME

-

Container name.

-

--net=none

-

Connects a container to a network.

-

--pids-limit

-

Limits the number of processes that can be executed in the container. The value -1 indicates no limit.

-

--privileged

-

Grants container extension privileges.

-

-R, --runtime

-

Container runtime. The parameter value can be lcr, which is case insensitive. Therefore, LCR and lcr are equivalent.

-

--read-only

-

Sets the rootfs of a container to read-only.

-

--restart

-

Restart policy upon container exit.

-

For a system container, --restart on-reboot is supported.

-

--rm

-

Automatically clears a container upon exit.

-

--storage-opt

-

Configures the storage driver option for a container.

-

-t, --tty

-

Allocates a pseudo terminal.

-

--ulimit

-

Sets the ulimit for a container.

-

-u, --user

-

User name or UID, in the format of [<name|uid>][:<group|gid>].

-

-v, --volume=[]

-

Mounts a volume.

-
- -### Constraints - -- When the parent process of a container exits, the corresponding container automatically exits. -- When a common container is created, the parent process cannot be initiated because the permission of common containers is insufficient. As a result, the container does not respond when you run the **attach** command though it is created successfully. -- If **--net** is not specified when the container is running, the default host name is **localhost**. -- If the **--files-limit** parameter is to transfer a small value, for example, 1, when the container is started, iSulad creates a cgroup, sets the files.limit value, and writes the PID of the container process to the **cgroup.procs** file of the cgroup. At this time, the container process has opened more than one handle. As a result, a write error is reported, and the container fails to be started. -- If both** --mount** and **--volume** exist and their destination paths conflict, **--mount** will be run after **--volume** \(that is, the mount point in **--volume** will be overwritten\). - - Note: The value of the **type** parameter of lightweight containers can be **bind** or **squashfs**. When **type** is set to **squashfs**, **src** is the image path. The value of the **type** parameter of the native Docker can be **bind**, **volume**, and **tmpfs**. - -- The restart policy does not support **unless-stopped**. -- The values returned for Docker and lightweight containers are 127 and 125 respectively in the following three scenarios: - - The host device specified by **--device** does not exist. - - The hook JSON file specified by **--hook-spec** does not exist. - - The entry point specified by **--entrypoint** does not exist. - -- When the **--volume** parameter is used, /dev/ptmx will be deleted and recreated during container startup. Therefore, do not mount the **/dev** directory to that of the container. Use **--device** to mount the devices in **/dev** of the container. -- Do not use the echo option to input data to the standard input of the **run** command. Otherwise, the client will be suspended. The echo value should be directly transferred to the container as a command line parameter. - - ``` - [root@localhost ~]# echo ls | isula run -i busybox /bin/sh - - - ^C - [root@localhost ~]# - ``` - - The client is suspended when the preceding command is executed because the preceding command is equivalent to input **ls** to **stdin**. Then EOF is read and the client does not send data and waits for the server to exit. However, the server cannot determine whether the client needs to continue sending data. As a result, the server is suspended in reading data, and both parties are suspended. - - The correct execution method is as follows: - - ``` - [root@localhost ~]# isula run -i busybox ls - bin - dev - etc - home - proc - root - sys - tmp - usr - var - [root@localhost ~]# - ``` - -- If the root directory \(/\) of the host is used as the file system of the container, the following situations may occur during the mounting: - - **Table 2** Mounting scenarios - - - - - - - - - - - - - -

Host Path (Source)

-

Container Path (Destination)

-

/home/test1

-

/mnt/

-

/home/test2

-

/mnt/abc

-
- - >![](./public_sys-resources/icon-notice.gif) **NOTICE:** - >Scenario 1: Mount **/home/test1** and then **/home/test2**. In this case, the content in **/home/test1** overwrites the content in **/mnt**. As a result, the **abc** directory does not exist in **/mnt**, and mounting** /home/test2** to **/mnt/abc** fails. - >Scenario 2: Mount **/home/test2** and then **/home/test1**. In this case, the content of **/mnt** is replaced with the content of **/home/test1** during the second mounting. In this way, the content mounted during the first mounting from **/home/test2** to **/mnt/abc** is overwritten. - >The first scenario is not supported. For the second scenario, users need to understand the risk of data access failures. - - >![](./public_sys-resources/icon-notice.gif) **NOTICE:** - >- In high concurrency scenarios \(200 containers are concurrently started\), the memory management mechanism of Glibc may cause memory holes and large virtual memory \(for example, 10 GB\). This problem is caused by the restriction of the Glibc memory management mechanism in the high concurrency scenario, but not by memory leakage. Therefore, the memory consumption does not increase infinitely. You can set the **MALLOC\_ARENA\_MAX** environment variable to reduce the virtual memory and increase the probability of reducing the physical memory. However, this environment variable will cause the iSulad concurrency performance to deteriorate. Set this environment variable based on the site requirements. - > ``` - > To balance performance and memory usage, set MALLOC_ARENA_MAX to 4. (The iSulad performance deterioration on the ARM64 server is controlled by less than 10%.) - > Configuration method: - > 1. To manually start iSulad, run the export MALLOC_ARENA_MAX=4 command and then start the iSulad. - > 2. If systemd manages iSulad, you can modify the /etc/sysconfig/iSulad file by adding MALLOC_ARENA_MAX=4. - > ``` - - -### Example - -Run a new container. - -``` -$ isula run -itd busybox -9c2c13b6c35f132f49fb7ffad24f9e673a07b7fe9918f97c0591f0d7014c713b -``` - -## Stopping a Container - -### Description - -To stop a container, run the **isula stop** command. The SIGTERM signal is sent to the first process in the container. If the container is not stopped within the specified time \(10s by default\), the SIGKILL signal is sent. - -### Usage - -``` -isula stop [OPTIONS] CONTAINER [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **stop** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

stop

-

-f, --force

-

Forcibly stops a running container.

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-t, --time

-

Time for graceful stop. If the time exceeds the value of this parameter, the container is forcibly stopped.

-
- -### Constraints - -- If the **t** parameter is specified and the value of **t** is less than 0, ensure that the application in the container can process the stop signal. - - Principle of the Stop command: Send the SIGTERM signal to the container, and then wait for a period of time \(**t** entered by the user\). If the container is still running after the period of time, the SIGKILL signal is sent to forcibly kill the container. - - -- The meaning of the input parameter **t** is as follows: - - **t** < 0: Wait for graceful stop. This setting is preferred when users are assured that their applications have a proper stop signal processing mechanism. - - **t** = 0: Do not wait and send **kill -9** to the container immediately. - - **t** \> 0: Wait for a specified period and send **kill -9** to the container if the container does not stop within the specified period. - - Therefore, if **t** is set to a value less than 0 \(for example, **t** = -1\), ensure that the container application correctly processes the SIGTERM signal. If the container ignores this signal, the container will be suspended when the **isula stop** command is run. - - -### Example - -Stop a container. - -``` -$ isula stop fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -``` - -## Forcibly Stopping a Container - -### Description - -To forcibly stop one or more running containers, run the **isula kill** command. - -### Usage - -``` -isula kill [OPTIONS] CONTAINER [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **kill** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

kill

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-s, --signal

-

Signal sent to the container.

-
- -### Example - -Kill a container. - -``` -$ isula kill fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -``` - -## Removing a Container - -### Description - -To remove a container, run the **isula rm** command. - -### Usage - -``` -isula rm [OPTIONS] CONTAINER [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **rm** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

rm

-

-f, --force

-

Forcibly removes a running container.

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-v, --volume

-

Removes a volume mounted to a container. (Note: Currently, iSulad does not use this function.)

-
- -### Constraints - -- In normal I/O scenarios, it takes T1 to delete a running container in an empty environment \(with only one container\). In an environment with 200 containers \(without a large number of I/O operations and with normal host I/O\), it takes T2 to delete a running container. The specification of T2 is as follows: T2 = max \{T1 x 3, 5\}s. - -### Example - -Delete a stopped container. - -``` -$ isula rm fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -``` - -## Attaching to a Container - -### Description - -To attach standard input, standard output, and standard error of the current terminal to a running container, run the **isula attach** command. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula attach [OPTIONS] CONTAINER -``` - -### Parameters - -The following table lists the parameters supported by the **attach** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

attach

-

--help

-

Displays help information.

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-D, --debug

-

Enables the debug mode.

-
- -### Constraints - -- For the native Docker, running the **attach** command will directly enter the container. For the iSulad container, you have to run the **attach** command and press **Enter** to enter the container. - -### Example - -Attach to a running container. - -``` -$ isula attach fd7376591a9c3d8ee9a14f5d2c2e5255b02cc44cddaabca82170efd4497510e1 -/ # -/ # -``` - -## Renaming a Container - -### Description - -To rename a container, run the **isula rename** command. - -### Usage - -``` -isula rename [OPTIONS] OLD_NAME NEW_NAME -``` - -### Parameters - -The following table lists the parameters supported by the **rename** command. - -**Table 1** Parameter description - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

rename

-

-H, --host

-

Renames a container.

-
- -### Example - -Rename a container. - -``` -$ isula rename my_container my_new_container -``` - -## Executing a Command in a Running Container - -### Description - -To execute a command in a running container, run the **isula exec** command. This command is executed in the default directory of the container. If a user-defined directory is specified for the basic image, the user-defined directory is used. - -### Usage - -``` -isula exec [OPTIONS] CONTAINER COMMAND [ARG...] -``` - -### Parameters - -The following table lists the parameters supported by the **exec** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

exec

-

  

-

-d, --detach

-

Runs a command in the background.

-

-e, --env

-

Sets environment variables. (Note: Currently, iSulad does not use this function.)

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-i, --interactive

-

Enables the standard input though no connection is set up. (Note: Currently, iSulad does not use this function.)

-

-t, --tty

-

Allocates a pseudo terminal. (Note: Currently, iSulad does not use this function.)

-

-u, --user

-

Logs in to the container as a specified user.

-
- -### Constraints - -- If no parameter is specified in the **isula exec** command, the **-it** parameter is used by default, indicating that a pseudo terminal is allocated and the container is accessed in interactive mode. -- When you run the **isula exec** command to execute a script and run a background process in the script, you need to use the **nohup** flag to ignore the **SIGHUP** signal. - - When you run the **isula exec** command to execute a script and run a background process in the script, you need to use the **nohup** flag. Otherwise, the kernel sends the **SIGHUP** signal to the process executed in the background when the process \(first process of the session\) exits. As a result, the background process exits and zombie processes occur. - -- After running the **isula exec** command to access the container process, do not run background programs. Otherwise, the system will be suspended. - - To run the **isula exec** command to execute a background process, perform the following steps: - - 1. Run the **isula exec container\_name bash** command to access the container. - 2. After entering the container, run the **script &** command. - 3. Run the **exit** command. The terminal stops responding. - - ``` - After the isula exec command is executed to enter the container, the background program stops responding because the isula exec command is executed to enter the container and run the background while1 program. When the bash command is run to exit the process, the while1 program does not exit and becomes an orphan process, which is taken over by process 1. - The while1 process is executed by the initial bash process fork &exec of the container. The while1 process copies the file handle of the bash process. As a result, the handle is not completely closed when the bash process exits. - The console process cannot receive the handle closing event, epoll_wait stops responding, and the process does not exit. - ``` - -- Do not run the **isula exec** command in the background. Otherwise, the system may be suspended. - - Run the **isula exec** command in the background as follows: - - Run the **isula exec script &** command in the background, for example, **isula exec container\_name script &,isula exec**. The command is executed in the background. The script continuously displays a file by running the **cat** command. Normally, there is output on the current terminal. If you press **Enter** on the current terminal, the client exits the stdout read operation due to the I/O read failure. As a result, the terminal does not output data. The server continues to write data to the buffer of the FIFO because the process is still displaying files by running the **cat** command. When the buffer is full, the process in the container is suspended in the write operation. - -- When a lightweight container uses the **exec** command to execute commands with pipe operations, you are advised to run the **/bin/bash -c** command. - - Typical application scenarios: - - Run the **isula exec container\_name -it ls /test | grep "xx" | wc -l** command to count the number of xx files in the test directory. The output is processed by **grep** and **wc** through the pipe because **ls /test** is executed with **exec**. The output of **ls /test** executed by **exec** contains line breaks. When the output is processed, the result is incorrect. - - Cause: Run the **ls /test** command using **exec**. The command output contains a line feed character. Run the** | grep "xx" | wc -l** command for the output. The processing result is 2 \(two lines\). - - ``` - [root@localhost ~]# isula exec -it container ls /test - xx xx10 xx12 xx14 xx3 xx5 xx7 xx9 - xx1 xx11 xx13 xx2 xx4 xx6 xx8 - [root@localhost ~]# - ``` - - Suggestion: When running the **run/exec** command to perform pipe operations, run the **/bin/bash -c** command to perform pipe operations in the container. - - ``` - [root@localhost ~]# isula exec -it container /bin/sh -c "ls /test | grep "xx" | wc -l" - 15 - [root@localhost ~]# - ``` - -- Do not use the **echo** option to input data to the standard input of the **exec** command. Otherwise, the client will be suspended. The echo value should be directly transferred to the container as a command line parameter. - - ``` - [root@localhost ~]# echo ls | isula exec 38 /bin/sh - - - ^C - [root@localhost ~]# - ``` - - The client is suspended when the preceding command is executed because the preceding command is equivalent to input **ls** to **stdin**. Then EOF is read and the client does not send data and waits for the server to exit. However, the server cannot determine whether the client needs to continue sending data. As a result, the server is suspended in reading data, and both parties are suspended. - - The correct execution method is as follows: - - ``` - [root@localhost ~]# isula exec 38 ls - bin dev etc home proc root sys tmp usr var - ``` - - -### Example - -Run the echo command in a running container. - -``` -$ isula exec c75284634bee echo "hello,world" -hello,world -``` - -## Querying Information About a Single Container - -### Description - -To query information about a single container, run the **isula inspect** command. - -### Usage - -``` -isula inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] -``` - -### Parameters - -The following table lists the parameters supported by the **inspect** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

inspect

-

  

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-f, --format

-

Output format.

-

-t, --time

-

Timeout interval, in seconds. If the inspect command fails to query container information within the specified period, the system stops waiting and reports an error immediately. The default value is 120s. If the value is less than or equal to 0, the inspect command keeps waiting until the container information is obtained successfully.

-
- -### Constraints - -- Lightweight containers do not support the output in \{ \{.State\} \} format but support the output in the \{ \{json .State\} \} format. The **-f** parameter is not supported when the object is an image. - -### Example - -Query information about a container. - -``` -$ isula inspect c75284634bee -[ - { - "Id": "c75284634beeede3ab86c828790b439d16b6ed8a537550456b1f94eb852c1c0a", - "Created": "2019-08-01T22:48:13.993304927-04:00", - "Path": "sh", - "Args": [], - "State": { - "Status": "running", - "Running": true, - "Paused": false, - "Restarting": false, - "Pid": 21164, - "ExitCode": 0, - "Error": "", - "StartedAt": "2019-08-02T06:09:25.535049168-04:00", - "FinishedAt": "2019-08-02T04:28:09.479766839-04:00", - "Health": { - "Status": "", - "FailingStreak": 0, - "Log": [] - } - }, - "Image": "busybox", - "ResolvConfPath": "", - "HostnamePath": "", - "HostsPath": "", - "LogPath": "none", - "Name": "c75284634beeede3ab86c828790b439d16b6ed8a537550456b1f94eb852c1c0a", - "RestartCount": 0, - "HostConfig": { - "Binds": [], - "NetworkMode": "", - "GroupAdd": [], - "IpcMode": "", - "PidMode": "", - "Privileged": false, - "SystemContainer": false, - "NsChangeFiles": [], - "UserRemap": "", - "ShmSize": 67108864, - "AutoRemove": false, - "AutoRemoveBak": false, - "ReadonlyRootfs": false, - "UTSMode": "", - "UsernsMode": "", - "Sysctls": {}, - "Runtime": "lcr", - "RestartPolicy": { - "Name": "no", - "MaximumRetryCount": 0 - }, - "CapAdd": [], - "CapDrop": [], - "Dns": [], - "DnsOptions": [], - "DnsSearch": [], - "ExtraHosts": [], - "HookSpec": "", - "CPUShares": 0, - "Memory": 0, - "OomScoreAdj": 0, - "BlkioWeight": 0, - "BlkioWeightDevice": [], - "CPUPeriod": 0, - "CPUQuota": 0, - "CPURealtimePeriod": 0, - "CPURealtimeRuntime": 0, - "CpusetCpus": "", - "CpusetMems": "", - "SecurityOpt": [], - "StorageOpt": {}, - "KernelMemory": 0, - "MemoryReservation": 0, - "MemorySwap": 0, - "OomKillDisable": false, - "PidsLimit": 0, - "FilesLimit": 0, - "Ulimits": [], - "Hugetlbs": [], - "HostChannel": { - "PathOnHost": "", - "PathInContainer": "", - "Permissions": "", - "Size": 0 - }, - "EnvTargetFile": "", - "ExternalRootfs": "" - }, - "Mounts": [], - "Config": { - "Hostname": "localhost", - "User": "", - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "TERM=xterm", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "Tty": true, - "Cmd": [ - "sh" - ], - "Entrypoint": [], - "Labels": {}, - "Annotations": { - "log.console.file": "none", - "log.console.filerotate": "7", - "log.console.filesize": "1MB", - "rootfs.mount": "/var/lib/isulad/mnt/rootfs", - "native.umask": "secure" - }, - "HealthCheck": { - "Test": [], - "Interval": 0, - "Timeout": 0, - "StartPeriod": 0, - "Retries": 0, - "ExitOnUnhealthy": false - } - }, - "NetworkSettings": { - "IPAddress": "" - } - } -] -``` - -## Querying Information About All Containers - -### Description - -To query information about all containers, run the **isula ps** command. - -### Usage - -``` -isula ps [OPTIONS] -``` - -### Parameters - -The following table lists the parameters supported by the **ps** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

ps

-

  

-

  

-

  

-

  

-

-a, --all

-

Displays all containers.

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-q, --quiet

-

Displays only the container name.

-

-f, --filter

-

Adds filter criteria.

-

--format

-

Output format.

-

--no-trunc

-

Do not truncate the container ID.

-
- -### Example - -Query information about all containers. - -``` -$ isula ps -a - -ID IMAGE STATUS PID COMMAND EXIT_CODE RESTART_COUNT STARTAT FINISHAT RUNTIME NAMES -e84660aa059c rnd-dockerhub.huawei.com/official/busybox running 304765 "sh" 0 0 13 minutes ago - lcr e84660aa059cafb0a77a4002e65cc9186949132b8e57b7f4d76aa22f28fde016 -$ isula ps -a --format "table {{.ID}} {{.Image}}" --no-trunc -ID IMAGE -e84660aa059cafb0a77a4002e65cc9186949132b8e57b7f4d76aa22f28fde016 rnd-dockerhub.huawei.com/official/busybox - -``` - -## Restarting a Container - -### Description - -To restart one or more containers, run the **isula restart** command. - -### Usage - -``` -isula restart [OPTIONS] CONTAINER [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **restart** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

restart

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-t, --time

-

Time for graceful stop. If the time exceeds the value of this parameter, the container is forcibly stopped.

-
- -### Constraints - -- If the **t** parameter is specified and the value of **t** is less than 0, ensure that the application in the container can process the stop signal. - - The restart command first calls the stop command to stop the container. Send the SIGTERM signal to the container, and then wait for a period of time \(**t** entered by the user\). If the container is still running after the period of time, the SIGKILL signal is sent to forcibly kill the container. - -- The meaning of the input parameter **t** is as follows: - - **t** < 0: Wait for graceful stop. This setting is preferred when users are assured that their applications have a proper stop signal processing mechanism. - - **t** = 0: Do not wait and send **kill -9** to the container immediately. - - **t** \> 0: Wait for a specified period and send **kill -9** to the container if the container does not stop within the specified period. - - Therefore, if **t** is set to a value less than 0 \(for example, **t** = -1\), ensure that the container application correctly processes the SIGTERM signal. If the container ignores this signal, the container will be suspended when the **isula stop** command is run. - - -### Example - -Restart a container. - -``` -$ isula restart c75284634beeede3ab86c828790b439d16b6ed8a537550456b1f94eb852c1c0a - c75284634beeede3ab86c828790b439d16b6ed8a537550456b1f94eb852c1c0a -``` - -## Waiting for a Container to Exit - -### Description - -To wait for one or more containers to exit, run the **isula wait** command. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula wait [OPTIONS] CONTAINER [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **wait** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

wait

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

/

-

Blocks until the container stops and displays the exit code.

-
- -### Example - -Wait for a single container to exit. - -``` -$ isula wait c75284634beeede3ab86c828790b439d16b6ed8a537550456b1f94eb852c1c0a - 137 -``` - -## Viewing Process Information in a Container - -### Description - -To view process information in a container, run the **isula top** command. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula top [OPTIONS] container [ps options] -``` - -### Parameters - -The following table lists the parameters supported by the **top** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

top

-

  

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

/

-

Queries the process information of a running container.

-
- -### Example - -Query process information in a container. - -``` -$ isula top 21fac8bb9ea8e0be4313c8acea765c8b4798b7d06e043bbab99fc20efa72629c -UID PID PPID C STIME TTY TIME CMD -root 22166 22163 0 23:04 pts/1 00:00:00 sh -``` - -## Displaying Resource Usage Statistics of a Container - -### Description - -To display resource usage statistics in real time, run the **isula stats** command. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula stats [OPTIONS] [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **stats** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

stats

-

  

-

  

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-a, --all

-

Displays all containers. (By default, only running containers are displayed.)

-

--no-stream

-

Display the first result only. Only statistics in non-stream mode are displayed.

-
- -### Example - -Display resource usage statistics. - -``` -$ isula stats --no-stream 21fac8bb9ea8e0be4313c8acea765c8b4798b7d06e043bbab99fc20efa72629c CONTAINER CPU % MEM USAGE / LIMIT MEM % BLOCK I / O PIDS -21fac8bb9ea8 0.00 56.00 KiB / 7.45 GiB 0.00 0.00 B / 0.00 B 1 -``` - -## Obtaining Container Logs - -### Description - -To obtain container logs, run the **isula logs** command. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula logs [OPTIONS] [CONTAINER...] -``` - -### Parameters - -The following table lists the parameters supported by the **logs** command. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

logs

-

  

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-f, --follow

-

Traces log output.

-

--tail

-

Displays the number of log records.

-
- -### Constraints - -- By default, the container log function is enabled. To disable this function, run the **isula create --log-opt disable-log=true** or **isula run --log-opt disable-log=true** command. - -### Example - -Obtain container logs. - -``` -$ isula logs 6a144695f5dae81e22700a8a78fac28b19f8bf40e8827568b3329c7d4f742406 -hello, world -hello, world -hello, world -``` - -## Copying Data Between a Container and a Host - -### Description - -To copy data between a host and a container, run the **isula cp** command. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH -isula cp [OPTIONS] SRC_PATH CONTAINER:DEST_PATH -``` - -### Parameters - -The following table lists the parameters supported by the **cp** command. - -**Table 1** Parameter description - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

cp

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-
- -### Constraints - -- When iSulad copies files, note that the **/etc/hostname**, **/etc/resolv.conf**, and **/etc/hosts** files are not mounted to the host, neither the **--volume** and **--mount** parameters. Therefore, the original files in the image instead of the files in the real container are copied. - - ``` - [root@localhost tmp]# isula cp b330e9be717a:/etc/hostname /tmp/hostname - [root@localhost tmp]# cat /tmp/hostname - [root@localhost tmp]# - ``` - -- When decompressing a file, iSulad does not check the type of the file or folder to be overwritten in the file system. Instead, iSulad directly overwrites the file or folder. Therefore, if the source is a folder, the file with the same name is forcibly overwritten as a folder. If the source file is a file, the folder with the same name will be forcibly overwritten as a file. - - ``` - [root@localhost tmp]# rm -rf /tmp/test_file_to_dir && mkdir /tmp/test_file_to_dir - [root@localhost tmp]# isula exec b330e9be717a /bin/sh -c "rm -rf /tmp/test_file_to_dir && touch /tmp/test_file_to_dir" - [root@localhost tmp]# isula cp b330e9be717a:/tmp/test_file_to_dir /tmp - [root@localhost tmp]# ls -al /tmp | grep test_file_to_dir - -rw-r----- 1 root root 0 Apr 26 09:59 test_file_to_dir - ``` - - -- iSulad freezes the container during the copy process and restores the container after the copy is complete. - -### Example - -Copy the **/test/host** directory on the host to the **/test** directory on container 21fac8bb9ea8. - -``` -isula cp /test/host 21fac8bb9ea8:/test -``` - -Copy the **/www** directory on container 21fac8bb9ea8 to the **/tmp** directory on the host. - -``` -isula cp 21fac8bb9ea8:/www /tmp/ -``` - -## Pausing a Container - -### Description - -To pause all processes in a container, run the **isula pause** command. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula pause CONTAINER [CONTAINER...] -``` - -### Parameters - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

pause

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-
- -### Constraints - -- Only containers in the running state can be paused. -- After a container is paused, other lifecycle management operations \(such as **restart**, **exec**, **attach**, **kill**, **stop**, and **rm**\) cannot be performed. -- After a container with health check configurations is paused, the container status changes to unhealthy. - -### Example - -Pause a running container. - -``` -$ isula pause 8fe25506fb5883b74c2457f453a960d1ae27a24ee45cdd78fb7426d2022a8bac - 8fe25506fb5883b74c2457f453a960d1ae27a24ee45cdd78fb7426d2022a8bac -``` - -## Resuming a Container - -### Description - -To resume all processes in a container, run the **isula unpause** command. It is the reverse process of **isula pause**. Only containers whose runtime is of the LCR type are supported. - -### Usage - -``` -isula unpause CONTAINER [CONTAINER...] -``` - -### Parameters - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

pause

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-
- -### Constraints - -- Only containers in the paused state can be unpaused. - -### Example - -Resume a paused container. - -``` -$ isula unpause 8fe25506fb5883b74c2457f453a960d1ae27a24ee45cdd78fb7426d2022a8bac - 8fe25506fb5883b74c2457f453a960d1ae27a24ee45cdd78fb7426d2022a8bac -``` - -## Obtaining Event Messages from the Server in Real Time - -### **Description** - -The **isula events** command is used to obtain event messages such as container image lifecycle and running event from the server in real time. Only containers whose runtime type is **lcr** are supported. - -### Usage - -``` -isula events [OPTIONS] -``` - -### Parameter - - - - - - - - - - - - - - - - - - -

Command

-

Parameter

-

Description

-

events

-

-H, --host

-

Specifies the iSulad socket file path to be accessed.

-

-n, --name

-

Obtains event messages of a specified container.

-

-S, --since

-

Obtains event messages generated since a specified time.

-
- -### Example - -Run the following command to obtain event messages from the server in real time: - -``` -$ isula events -``` - diff --git a/docs/en/docs/Container/container-resource-management.md b/docs/en/docs/Container/container-resource-management.md deleted file mode 100644 index 3b7166202e7354619bb61d14e55eaa1d0386e9c3..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/container-resource-management.md +++ /dev/null @@ -1,731 +0,0 @@ -# Container Resource Management - -- [Container Resource Management](#container-resource-management) - - [Sharing Resources](#sharing-resources) - - [Restricting CPU Resources of a Running Container](#restricting-cpu-resources-of-a-running-container) - - [Restricting the Memory Usage of a Running Container](#restricting-the-memory-usage-of-a-running-container) - - [Restricting I/O Resources of a Running Container](#restricting-i-o-resources-of-a-running-container) - - [Restricting the Rootfs Storage Space of a Container](#restricting-the-rootfs-storage-space-of-a-container) - - [Restricting the Number of File Handles in a Container](#restricting-the-number-of-file-handles-in-a-container) - - [Restricting the Number of Processes or Threads that Can Be Created in a Container](#restricting-the-number-of-processes-or-threads-that-can-be-created-in-a-container) - - [Configuring the ulimit Value in a Container](#configuring-the-ulimit-value-in-a-container) - - - - - -## Sharing Resources - -### Description - -Containers or containers and hosts can share namespace information mutually, including PID, network, IPC, and UTS information. - -### Usage - -When running the **isula create/run** command, you can set the namespace parameters to share resources. For details, see the following parameter description table. - -### Parameters - -You can specify the following parameters when running the **lcrc create/run** command: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--pid

-

Specifies the PID namespace to be shared.

-

[none, host, container:<containerID>]: none indicates that the namespace is not shared. host indicates that the namespace is shared with the host. container:<containerID> indicates that the namespace is shared with the container.

-

No

-

--net

-

Specifies the network namespace to be shared.

-

[none, host, container:<containerID>]: none indicates that the namespace is not shared. host indicates that the namespace is shared with the host. container:<containerID> indicates that the namespace is shared with the container.

-

No

-

--ipc

-

Specifies the IPC namespace to be shared.

-

[none, host, container:<containerID>]: none indicates that the namespace is not shared. host indicates that the namespace is shared with the host. container:<containerID> indicates that the namespace is shared with the container.

-

No

-

--uts

-

Specifies the UTS namespace to be shared.

-

[none, host, container:<containerID>]: none indicates that the namespace is not shared. host indicates that the namespace is shared with the host. container:<containerID> indicates that the namespace is shared with the container.

-

No

-
- -### Example - -If two containers need to share the same PID namespace, add **--pid container:** when running the container. For example: - -``` -isula run -tid --name test_pid busybox sh -isula run -tid --name test --pid container:test_pid busybox sh -``` - -## Restricting CPU Resources of a Running Container - -### Description - -You can set parameters to restrict the CPU resources of a container. - -### Usage - -When running the **isula create/run** command, you can set CPU-related parameters to limit the CPU resources of a container. For details about the parameters and values, see the following table. - -### Parameters - -You can specify the following parameters when running the **isula create/run** command: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--cpu-period

-

Limits the CPU CFS period in a container.

-

64-bit integer

-

No

-

--cpu-quota

-

Limits the CPU CFS quota.

-

64-bit integer

-

No

-

--cpu-shares

-

Limits the CPU share (relative weight).

-

64-bit integer

-

No

-

--cpuset-cpus

-

Limits the CPU nodes.

-

A character string. The value is the number of CPUs to be configured. The value ranges from 0 to 3, or 0 and 1.

-

No

-

--cpuset-mems

-

Limits the memory nodes used by cpuset in the container.

-

A character string. The value is the number of CPUs to be configured. The value ranges from 0 to 3, or 0 and 1.

-

No

-
- -### Example - -To restrict a container to use a specific CPU, add **--cpuset-cpus number** when running the container. For example: - -``` -isula run -tid --cpuset-cpus 0,2-3 busybox sh -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->You can check whether the configuration is successful. For details, see "Querying Information About a Single Container." - -## Restricting the Memory Usage of a Running Container - -### Description - -You can set parameters to restrict the memory usage of a container. - -### Usage - -When running the **isula create/run** command, you can set memory-related parameters to restrict memory usage of containers. For details about the parameters and values, see the following table. - -### Parameters - -You can specify the following parameters when running the **isula create/run** command: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--memory

-

Specifies the upper limit of the memory usage of a container.

-

64-bit integer The value is a non-negative number. The value 0 indicates that no limit is set. The unit can be empty (byte), KB, MB, GB, TB, or PB.

-

No

-

--memory-reservation

-

Specifies the soft upper limit of the memory of a container.

-

64-bit integer The value is a non-negative number. The value 0 indicates that no limit is set. The unit can be empty (byte), KB, MB, GB, TB, or PB.

-

No

-

--memory-swap

-

Specifies the upper limit of the swap memory of the container.

-

64-bit integer The value can be -1 or a non-negative number. The value -1 indicates no limit, and the value 0 indicates that no limit is set. The unit can be empty (byte), KB, MB, GB, TB, or PB.

-

No

-

--kernel-memory

-

Specifies the upper limit of the kernel memory of the container.

-

64-bit integer The value is a non-negative number. The value 0 indicates that no limit is set. The unit can be empty (byte), KB, MB, GB, TB, or PB.

-

No

-
- -### Example - -To set the upper limit of the memory of a container, add **--memory \[\]** when running the container. For example: - -``` -isula run -tid --memory 1G busybox sh -``` - -## Restricting I/O Resources of a Running Container - -### Description - -You can set parameters to limit the read/write speed of devices in the container. - -### Usage - -When running the **isula create/run** command, you can set **--device-read-bps/--device-write-bps :\[\]** to limit the read/write speed of devices in the container. - -### Parameters - -When running the **isula create/run** command, set **--device-read/write-bps**. - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--device-read-bps/--device-write-bps

-

Limits the read/write speed of devices in the container.

-

64-bit integer The value is a positive integer. The value can be 0, indicating that no limit is set. The unit can be empty (byte), KB, MB, GB, TB, or PB.

-

No

-
- -### Example - -To limit the read/write speed of devices in the container, add **--device-write-bps/--device-read-bps :\[\]** when running the container. For example, to limit the read speed of the device **/dev/sda** in the container **busybox** to 1 Mbit/s, run the following command: - -``` -isula run -tid --device-write /dev/sda:1mb busybox sh -``` - -To limit the write speed, run the following command: - -``` -isula run -tid read-bps /dev/sda:1mb busybox sh -``` - -## Restricting the Rootfs Storage Space of a Container - -### Description - -When the overlay2 storage driver is used on the EXT4 file system, the file system quota of a single container can be set. For example, the quota of container A is set to 5 GB, and the quota of container B is set to 10 GB. - -This feature is implemented by the project quota function of the EXT4 file system. If the kernel supports this function, use the syscall SYS\_IOCTL to set the project ID of a directory, and then use the syscall SYS\_QUOTACTL to set the hard limit and soft limit of the corresponding project ID. - -### Usage - -1. Prepare the environment. - - Ensure that the file system supports the **Project ID** and **Project Quota** attributes, the kernel version is 4.19 or later, and the version of the peripheral package e2fsprogs is 1.43.4-2 or later. - -2. Before mounting overlayfs to a container, set different project IDs for the upper and work directories of different containers and set inheritance options. After overlayfs is mounted to a container, the project IDs and inherited attributes cannot be modified. -3. Set the quota as a privileged user outside the container. -4. Add the following configuration to daemon: - - ``` - -s overlay2 --storage-opt overlay2.override_kernel_check=true - ``` - -5. Daemon supports the following options for setting default restrictions for containers: - - **--storage-opt overlay2.basesize=128M** specifies the default limit. If **--storeage-opt size** is also specified when you run the **isula run** command, the value of this parameter takes effect. If no size is specified during the daemon process or when you run the **isula run** command, the size is not limited. - -6. Enable the **Project ID** and **Project Quota** attributes of the file system. - - Format and mount the file system. - - ``` - # mkfs.ext4 -O quota,project /dev/sdb - # mount -o prjquota /dev/sdb /var/lib/isulad - ``` - - - -### Parameters - -When running the **create/run** command, set **--storage-opt**. - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--storage-opt size=${rootfsSize}

-

Restricts the root file system (rootfs) storage space of the container.

-

The size parsed by rootfsSize is a positive 64-bit integer expressed in bytes. You can also set it to ([kKmMgGtTpP])?[iI]?[bB]?$.

-

No

-
- -### Example - -In the **isula run/create** command, use the existing parameter **--storage-opt size=**_value_ to set the quota. The value is a positive number in the unit of **\[kKmMgGtTpP\]?\[iI\]?\[bB\]?**. If the value does not contain a unit, the default unit is byte. - -``` -$ [root@localhost ~]# isula run -ti --storage-opt size=10M busybox -/ # df -h -Filesystem Size Used Available Use% Mounted on -overlay 10.0M 48.0K 10.0M 0% / -none 64.0M 0 64.0M 0% /dev -none 10.0M 0 10.0M 0% /sys/fs/cgroup -tmpfs 64.0M 0 64.0M 0% /dev -shm 64.0M 0 64.0M 0% /dev/shm -/dev/mapper/vg--data-ext41 - 9.8G 51.5M 9.2G 1% /etc/hostname -/dev/mapper/vg--data-ext41 - 9.8G 51.5M 9.2G 1% /etc/resolv.conf -/dev/mapper/vg--data-ext41 - 9.8G 51.5M 9.2G 1% /etc/hosts -tmpfs 3.9G 0 3.9G 0% /proc/acpi -tmpfs 64.0M 0 64.0M 0% /proc/kcore -tmpfs 64.0M 0 64.0M 0% /proc/keys -tmpfs 64.0M 0 64.0M 0% /proc/timer_list -tmpfs 64.0M 0 64.0M 0% /proc/sched_debug -tmpfs 3.9G 0 3.9G 0% /proc/scsi -tmpfs 64.0M 0 64.0M 0% /proc/fdthreshold -tmpfs 64.0M 0 64.0M 0% /proc/fdenable -tmpfs 3.9G 0 3.9G 0% /sys/firmware -/ # -/ # dd if=/dev/zero of=/home/img bs=1M count=12 && sync -dm-4: write failed, project block limit reached. -10+0 records in -9+0 records out -10432512 bytes (9.9MB) copied, 0.011782 seconds, 844.4MB/s -/ # df -h | grep overlay -overlay 10.0M 10.0M 0 100% / -/ # -``` - -### Constraints - -1. The quota applies only to the rw layer. - - The quota of overlay2 is for the rw layer of the container. The image size is not included. - -2. The kernel supports and enables this function. - - The kernel must support the EXT4 project quota function. When running **mkfs**, add **-O quota,project**. When mounting the file system, add **-o prjquota**. If any of the preceding conditions is not met, an error is reported when **--storage-opt size=**_value_ is used. - - ``` - $ [root@localhost ~]# isula run -it --storage-opt size=10Mb busybox df -h - Error response from daemon: Failed to prepare rootfs with error: time="2019-04-09T05:13:52-04:00" level=fatal msg="error creating read- - write layer with ID "a4c0e55e82c55e4ee4b0f4ee07f80cc2261cf31b2c2dfd628fa1fb00db97270f": --storage-opt is supported only for overlay over - xfs or ext4 with 'pquota' mount option" - ``` - -3. Description of the limit of quota: - 1. If the quota is greater than the size of the partition where user **root** of iSulad is located, the file system quota displayed by running the **df** command in the container is the size of the partition where user **root** of iSulad is located, not the specified quota. - 2. **--storage-opt size=0** indicates that the size is not limited and the value cannot be less than 4096. The precision of size is one byte. If the specified precision contains decimal bytes, the decimal part is ignored. For example, if size is set to **0.1**, the size is not limited. \(The value is restricted by the precision of the floating point number stored on the computer. That is, 0.999999999999999999999999999 is equal to 1. The number of digits 9 may vary according to computers. Therefore, 4095.999999999999999999999999999 is equal to 4096.\) Note that running **isula inspect** displays the original command line specified format. If the value contains decimal bytes, you need to ignore the decimal part. - 3. If the quota is too small, for example,** --storage-opt size=4k**, the container may fail to be started because some files need to be created for starting the container. - 4. The **-o prjquota** option is added to the root partition of iSulad when iSulad is started last time. If this option is not added during this startup, the setting of the container with quota created during the last startup does not take effect. - 5. The value range of the daemon quota **--storage-opt overlay2.basesize** is the same as that of **--storage-opt size**. - -4. When **storage-opt** is set to 4 KB, the lightweight container startup is different from that of Docker. - - Use the **storage-opt size=4k** and image **rnd-dockerhub.huawei.com/official/ubuntu-arm64:latest** to run the container. - - Docker fails to be started. - - ``` - [root@localhost ~]# docker run -itd --storage-opt size=4k rnd-dockerhub.huawei.com/official/ubuntu-arm64:latest - docker: Error response from daemon: symlink /proc/mounts /var/lib/docker/overlay2/e6e12701db1a488636c881b44109a807e187b8db51a50015db34a131294fcf70-init/merged/etc/mtab: disk quota exceeded. - See 'docker run --help'. - ``` - - The lightweight container is started properly and no error is reported. - - ``` - [root@localhost ~]# isula run -itd --storage-opt size=4k rnd-dockerhub.huawei.com/official/ubuntu-arm64:latest - 636480b1fc2cf8ac895f46e77d86439fe2b359a1ff78486ae81c18d089bbd728 - [root@localhost ~]# isula ps - STATUS PID IMAGE COMMAND EXIT_CODE RESTART_COUNT STARTAT FINISHAT RUNTIME ID NAMES - running 17609 rnd-dockerhub.huawei.com/official/ubuntu-arm64:latest /bin/bash 0 0 2 seconds ago - lcr 636480b1fc2c 636480b1fc2cf8ac895f46e77d86439fe2b359a1ff78486ae81c18d089bbd728 - ``` - - During container startup, if you need to create a file in the **rootfs** directory of the container, the image size exceeds 4 KB, and the quota is set to 4 KB, the file creation will fail. - - When Docker starts the container, it creates more mount points than iSulad to mount some directories on the host to the container, such as **/proc/mounts** and **/dev/shm**. If these files do not exist in the image, the creation will fail, therefore, the container fails to be started. - - When a lightweight container uses the default configuration during container startup, there are few mount points. The lightweight container is created only when the directory like **/proc** or **/sys** does not exist. The image **rnd-dockerhub.huawei.com/official/ubuntu-arm64:latest** in the test case contains **/proc** and **/sys**. Therefore, no new file or directory is generated during the container startup. As a result, no error is reported during the lightweight container startup. To verify this process, when the image is replaced with **rnd-dockerhub.huawei.com/official/busybox-aarch64:latest**, an error is reported when the lightweight container is started because **/proc** does not exist in the image. - - ``` - [root@localhost ~]# isula run -itd --storage-opt size=4k rnd-dockerhub.huawei.com/official/busybox-aarch64:latest - 8e893ab483310350b8caa3b29eca7cd3c94eae55b48bfc82b350b30b17a0aaf4 - Error response from daemon: Start container error: runtime error: 8e893ab483310350b8caa3b29eca7cd3c94eae55b48bfc82b350b30b17a0aaf4:tools/lxc_start.c:main:404 starting container process caused "Failed to setup lxc, - please check the config file." - ``` - -5. Other description: - - When using iSulad with the quota function to switch data disks, ensure that the data disks to be switched are mounted using the **prjquota** option and the mounting mode of the **/var/lib/isulad/storage/overlay2** directory is the same as that of the **/var/lib/isulad** directory. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Before switching the data disk, ensure that the mount point of **/var/lib/isulad/storage/overlay2** is unmounted. - - -## Restricting the Number of File Handles in a Container - -### Description - -You can set parameters to limit the number of file handles that can be opened in a container. - -### Usage - -When running the **isula create/run** command, set the **--files-limit** parameter to limit the number of file handles that can be opened in a container. - -### Parameters - -Set the **--files-limit** parameter when running the **isula create/run** command. - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--files-limit

-

Limits the number of file handles that can be opened in a container.

-

64-bit integer The value can be 0 or a negative number, but cannot be greater than 2 to the power of 63 minus 1. The value 0 or a negative number indicates no limit.

-

During container creation, some handles are opened temporarily. Therefore, the value cannot be too small. Otherwise, the container may not be restricted by the file limit. If the value is less than the number of opened handles, the cgroup file cannot be written. It is recommended that the value be greater than 30.

-

No

-
- -### Example - -When running the container, add **--files-limit n**. For example: - -``` -isula run -ti --files-limit 1024 busybox bash -``` - -### Constraints - -1. If the **--files-limit** parameter is set to a small value, for example, 1, the container may fail to be started. - - ``` - [root@localhost ~]# isula run -itd --files-limit 1 rnd-dockerhub.huawei.com/official/busybox-aarch64 - 004858d9f9ef429b624f3d20f8ba12acfbc8a15bb121c4036de4e5745932eff4 - Error response from daemon: Start container error: Container is not running:004858d9f9ef429b624f3d20f8ba12acfbc8a15bb121c4036de4e5745932eff4 - ``` - - Docker will be started successfully, and the value of **files.limit cgroup** is **max**. - - ``` - [root@localhost ~]# docker run -itd --files-limit 1 rnd-dockerhub.huawei.com/official/busybox-aarch64 - ef9694bf4d8e803a1c7de5c17f5d829db409e41a530a245edc2e5367708dbbab - [root@localhost ~]# docker exec -it ef96 cat /sys/fs/cgroup/files/files.limit - max - ``` - - The root cause is that the startup principles of the lxc and runc processes are different. After the lxc process creates the cgroup, the files.limit value is set, and then the PID of the container process is written into the cgroup.procs file of the cgroup. At this time, the process has opened more than one handle. As a result, an error is reported, and the startup fails. After you create a cgroup by running the **runc** command, the PID of the container process is written to the cgroup.procs file of the cgroup, and then the files.limit value is set. Because more than one handle is opened by the process in the cgroup, the file.limit value does not take effect, the kernel does not report any error, and the container is started successfully. - - -## Restricting the Number of Processes or Threads that Can Be Created in a Container - -### Description - -You can set parameters to limit the number of processes or threads that can be created in a container. - -### Usage - -When creating or running a container, use the **--pids-limit** parameter to limit the number of processes or threads that can be created in the container. - -### Parameters - -When running the **create/run** command, set the **--pids-limit** parameter. - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--pids-limit

-

Limits the number of file handles that can be opened in a container.

-

64-bit integer The value can be 0 or a negative number, but cannot be greater than 2 to the power of 63 minus 1. The value 0 or a negative number indicates no limit.

-

No

-
- -### Example - -When running the container, add **--pids-limit n**. For example: - -``` -isula run -ti --pids-limit 1024 busybox bash -``` - -### Constraints - -During container creation, some processes are created temporarily. Therefore, the value cannot be too small. Otherwise, the container may fail to be started. It is recommended that the value be greater than 10. - -## Configuring the ulimit Value in a Container - -### Description - -You can use parameters to control the resources for executed programs. - -### Usage - -Set the **--ulimit** parameter when creating or running a container, or configure the parameter on the daemon to control the resources for executed programs in the container. - -### Parameters - -Use either of the following methods to configure ulimit: - -1. When running the **isula create/run** command, use **--ulimit =\[:\]** to control the resources of the executed shell program. - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value Range

-

Mandatory or Not

-

--ulimit

-

Limits the resources of the executed shell program.

-

64-bit integer The value of the soft limit must be less than or equal to that of the hard limit. If only the soft limit is specified, the value of the hard limit is equal to that of the soft limit. Some types of resources do not support negative numbers. For details, see the following table.

-

No

-
- -2. Use daemon parameters or configuration files. - - For details, see --default-ulimits in [Deployment Mode](#deployment-mode.md#EN-US_TOPIC_0184808043). - - **--ulimit** can limit the following types of resources: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Type

-

Description

-

Value Range

-

core

-

limits the core file size (KB)

-

64-bit integer, without unit. The value can be 0 or a negative number. The value -1 indicates no limit. Other negative numbers are forcibly converted into a large positive integer.

-

cpu

-

max CPU time (MIN)

-

data

-

max data size (KB)

-

fsize

-

maximum filesize (KB)

-

locks

-

max number of file locks the user can hold

-

memlock

-

max locked-in-memory address space (KB)

-

msgqueue

-

max memory used by POSIX message queues (bytes)

-

nice

-

nice priority

-

nproc

-

max number of processes

-

rss

-

max resident set size (KB)

-

rtprio

-

max realtime priority

-

rttime

-

realtime timeout

-

sigpending

-

max number of pending signals

-

stack

-

max stack size (KB)

-

nofile

-

max number of open file descriptors

-

64-bit integer, without unit. The value cannot be negative. A negative number is forcibly converted to a large positive number. In addition, "Operation not permitted" is displayed during the setting.

-
- - -### Example - -When creating or running a container, add **--ulimit =\[:\]**. For example: - -``` -isula create/run -tid --ulimit nofile=1024:2048 busybox sh -``` - -### Constraints - -The ulimit cannot be configured in the **daemon.json** and **/etc/sysconfig/iSulad** files \(or the iSulad command line\). Otherwise, an error is reported when iSulad is started. - diff --git a/docs/en/docs/Container/container.md b/docs/en/docs/Container/container.md deleted file mode 100644 index 60c6b10a0f4f4e7ee909db6cc161b8d030f3419c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/container.md +++ /dev/null @@ -1,24 +0,0 @@ -# Container User Guide - -## Overview - -The openEuler software package provides iSula, the basic platform for running containers. - -iSula is a brand of Huawei's container technology solution. It originally means a kind of ant. This ant is also known as "bullet ant" due to the extremely painful sting, which has been compared to being shot by a bullet. In the eyes of Brazilian natives living in the Amazon jungle in Central and South America, iSula is one of the most powerful insects in the world. Huawei names the container technology solution brand based on its meaning. - -The basic container platform iSula provides both Docker engine and lightweight container engine iSulad. You can select either of them as required. - -In addition, the following container forms are provided on different application scenarios: - -- Common containers applicable to most common scenarios -- Secure containers applicable to strong isolation and multi-tenant scenarios -- System containers applicable to scenarios where the systemd is used to manage services - -This document describes how to install and use the container engines and how to deploy and use containers in different forms. - -## Intended Audience - -This document is intended for openEuler users who need to install containers. You can better understand this document if you: - -- Be familiar with basic Linux operations. -- Have a basic understanding of containers. diff --git a/docs/en/docs/Container/cri.md b/docs/en/docs/Container/cri.md deleted file mode 100644 index cd412f0664db94d13f0fcbe4e3c9cd08245022cf..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/cri.md +++ /dev/null @@ -1,2911 +0,0 @@ -# CRI - -- [CRI](#cri) - - [Description](#description) - - [APIs](#apis) - - [Runtime Service](#runtime-service) - - [RunPodSandbox](#runpodsandbox) - - [StopPodSandbox](#stoppodsandbox) - - [RemovePodSandbox](#removepodsandbox) - - [PodSandboxStatus](#podsandboxstatus) - - [ListPodSandbox](#listpodsandbox) - - [CreateContainer](#createcontainer) - - [StartContainer](#startcontainer) - - [StopContainer](#stopcontainer) - - [RemoveContainer](#removecontainer) - - [ListContainers](#listcontainers) - - [ContainerStatus](#containerstatus) - - [UpdateContainerResources](#updatecontainerresources) - - [ExecSync](#execsync) - - [Exec](#exec) - - [Attach](#attach) - - [ContainerStats](#containerstats) - - [ListContainerStats](#listcontainerstats) - - [UpdateRuntimeConfig](#updateruntimeconfig) - - [Status](#status) - - [Image Service](#image-service) - - [ListImages](#listimages) - - [ImageStatus](#imagestatus) - - [PullImage](#pullimage) - - [RemoveImage](#removeimage) - - [ImageFsInfo](#imagefsinfo) - - [Constraints](#constraints-2) - - -## Description - -The Container Runtime Interface \(CRI\) provided by Kubernetes defines container and image service APIs. iSulad uses the CRI to interconnect with Kubernetes. - -Since the container runtime is isolated from the image lifecycle, two services need to be defined. This API is defined by using [Protocol Buffer](https://developers.google.com/protocol-buffers/) based on [gRPC](https://grpc.io/). - -The current CRI version is v1alpha1. For official API description, access the following link: - -[https://github.com/kubernetes/kubernetes/blob/release-1.14/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto](https://github.com/kubernetes/kubernetes/blob/release-1.14/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto) - -iSulad uses the API description file of version 1.14 used by Pass, which is slightly different from the official API description file. API description in this document prevails. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The listening IP address of the CRI WebSocket streaming service is **127.0.0.1** and the port number is **10350**. The port number can be configured in the **--websocket-server-listening-port** command or in the **daemon.json** configuration file. - -## APIs - -The following tables list the parameters that may be used in each API. Some parameters do not take effect now, which have been noted in the corresponding parameter description. - -### API Parameters - -- **DNSConfig** - - The API is used to configure DNS servers and search domains of a sandbox. - - - - - - - - - - - - - - - -

Parameter

-

Description

-

repeated string servers

-

DNS server list of a cluster.

-

repeated string searches

-

DNS search domain list of a cluster.

-

repeated string options

-

DNS option list. For details, see https://linux.die.net/man/5/resolv.conf.

-
- -- **Protocol** - - The API is used to specify enum values of protocols. - - - - - - - - - - - - -

Parameter

-

Description

-

TCP = 0↵

-

Transmission Control Protocol (TCP).

-

UDP = 1

-

User Datagram Protocol (UDP).

-
- -- **PortMapping** - - The API is used to configure the port mapping for a sandbox. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Protocol protocol

-

Protocol used for port mapping.

-

int32 container_port

-

Port number in the container.

-

int32 host_port

-

Port number on the host.

-

string host_ip

-

Host IP address.

-
- -- **MountPropagation** - - The API is used to specify enums of mount propagation attributes. - - - - - - - - - - - - - - - -

Parameter

-

Description

-

PROPAGATION_PRIVATE = 0

-

No mount propagation attributes, that is, private in Linux.

-

PROPAGATION_HOST_TO_CONTAINER = 1

-

Mount attribute that can be propagated from the host to the container, that is, rslave in Linux.

-

PROPAGATION_BIDIRECTIONAL = 2

-

Mount attribute that can be propagated between a host and a container, that is, rshared in Linux.

-
- -- **Mount** - - The API is used to mount a volume on the host to a container. \(Only files and folders are supported.\) - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string container_path

-

Path in the container.

-

string host_path

-

Path on the host.

-

bool readonly

-

Whether the configuration is read-only in the container.

-

Default value: false

-

bool selinux_relabel

-

Whether to set the SELinux label. This parameter does not take effect now.

-

MountPropagation propagation

-

Mount propagation attribute.

-

The value can be 0, 1, or 2, corresponding to the private, rslave, and rshared propagation attributes respectively.

-

Default value: 0

-
- - -- **NamespaceOption** - - - - - - - - - - - - - - - -

Parameter

-

Description

-

bool host_network

-

Whether to use host network namespaces.

-

bool host_pid

-

Whether to use host PID namespaces.

-

bool host_ipc

-

Whether to use host IPC namespaces.

-
- -- **Capability** - - This API is used to specify the capabilities to be added and deleted. - - - - - - - - - - - - -

Parameter

-

Description

-

repeated string add_capabilities

-

Capabilities to be added.

-

repeated string drop_capabilities

-

Capabilities to be deleted.

-
- - -- **Int64Value** - - The API is used to encapsulate data of the signed 64-bit integer type. - - - - - - - - - -

Parameter

-

Description

-

int64 value

-

Actual value of the signed 64-bit integer type.

-
- -- **UInt64Value** - - The API is used to encapsulate data of the unsigned 64-bit integer type. - - - - - - - - - -

Parameter

-

Description

-

uint64 value

-

Actual value of the unsigned 64-bit integer type.

-
- -- **LinuxSandboxSecurityContext** - - The API is used to configure the Linux security options of a sandbox. - - Note that these security options are not applied to containers in the sandbox, and may not be applied to the sandbox without any running process. - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

NamespaceOption namespace_options

-

Sandbox namespace options.

-

SELinuxOption selinux_options

-

SELinux options. This parameter does not take effect now.

-

Int64Value run_as_user

-

Process UID in the sandbox.

-

bool readonly_rootfs

-

Whether the root file system of the sandbox is read-only.

-

repeated int64 supplemental_groups

-

Information of the user group of the init process in the sandbox (except the primary GID).

-

bool privileged

-

Whether the sandbox is a privileged container.

-

string seccomp_profile_path

-

Path of the seccomp configuration file. Valid values are as follows:

-

// unconfined: Seccomp is not configured.

-

// localhost/ Full path of the configuration file: configuration file path installed in the system.

-

// Full path of the configuration file: full path of the configuration file.

-

// unconfined is the default value.

-
- -- **LinuxPodSandboxConfig** - - The API is used to configure information related to the Linux host and containers. - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string cgroup_parent

-

Parent path of the cgroup of the sandbox. The runtime can use the cgroupfs or systemd syntax based on site requirements. This parameter does not take effect now.

-

LinuxSandboxSecurityContext security_context

-

Security attribute of the sandbox.

-

map<string, string> sysctls

-

Linux sysctls configuration of the sandbox.

-
- -- **PodSandboxMetadata** - - Sandbox metadata contains all information that constructs a sandbox name. It is recommended that the metadata be displayed on the user interface during container running to improve user experience. For example, a unique sandbox name can be generated based on the metadata during running. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string name

-

Sandbox name.

-

string uid

-

Sandbox UID.

-

string namespace

-

Sandbox namespace.

-

uint32 attempt

-

Number of attempts to create a sandbox.

-

Default value: 0

-
- -- **PodSandboxConfig** - - This API is used to specify all mandatory and optional configurations for creating a sandbox. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

PodSandboxMetadata metadata

-

Sandbox metadata, which uniquely identifies a sandbox. The runtime must use the information to ensure that operations are correctly performed, and to improve user experience, for example, construct a readable sandbox name.

-

string hostname

-

Host name of the sandbox.

-

string log_directory

-

Folder for storing container log files in the sandbox.

-

DNSConfig dns_config

-

Sandbox DNS configuration.

-

repeated PortMapping port_mappings

-

Sandbox port mapping.

-

map<string, string> labels

-

Key-value pair that can be used to identify a sandbox or a series of sandboxes.

-

map<string, string> annotations

-

Key-value pair that stores any information, whose values cannot be changed and can be queried by using the PodSandboxStatus API.

-

LinuxPodSandboxConfig linux

-

Options related to the Linux host.

-
- -- **PodSandboxNetworkStatus** - - The API is used to describe the network status of a sandbox. - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string ip

-

IP address of the sandbox.

-

string name

-

Network interface name in the sandbox.

-

string network

-

Name of the additional network.

-
- -- **Namespace** - - The API is used to set namespace options. - - - - - - - - - -

Parameter

-

Description

-

NamespaceOption options

-

Linux namespace options.

-
- -- **LinuxPodSandboxStatus** - - The API is used to describe the status of a Linux sandbox. - - - - - - - - - -

Parameter

-

Description

-

Namespace namespaces

-

Sandbox namespace.

-
- -- **PodSandboxState** - - The API is used to specify enum data of the sandbox status values. - - - - - - - - - - - - -

Parameter

-

Description

-

SANDBOX_READY = 0

-

The sandbox is ready.

-

SANDBOX_NOTREADY = 1

-

The sandbox is not ready.

-
- -- **PodSandboxStatus** - - The API is used to describe the PodSandbox status. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Sandbox ID.

-

PodSandboxMetadata metadata

-

Sandbox metadata.

-

PodSandboxState state

-

Sandbox status value.

-

int64 created_at

-

Sandbox creation timestamp (unit: ns).

-

repeated PodSandboxNetworkStatus networks

-

Multi-plane network status of the sandbox.

-

LinuxPodSandboxStatus linux

-

Sandbox status complying with the Linux specifications.

-

map<string, string> labels

-

Key-value pair that can be used to identify a sandbox or a series of sandboxes.

-

map<string, string> annotations

-

Key-value pair that stores any information, whose values cannot be changed by the runtime.

-
- -- **PodSandboxStateValue** - - The API is used to encapsulate [PodSandboxState](#en-us_topic_0182207110_li1818214574195). - - - - - - - - - -

Parameter

-

Description

-

PodSandboxState state

-

Sandbox status value.

-
- -- **PodSandboxFilter** - - The API is used to add filter criteria for the sandbox list. The intersection of multiple filter criteria is displayed. - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Sandbox ID.

-

PodSandboxStateValue state

-

Sandbox status.

-

map<string, string> label_selector

-

Sandbox label, which does not support regular expressions and must be fully matched.

-
- -- **PodSandbox** - - This API is used to provide a minimum description of a sandbox. - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Sandbox ID.

-

PodSandboxMetadata metadata

-

Sandbox metadata.

-

PodSandboxState state

-

Sandbox status value.

-

int64 created_at

-

Sandbox creation timestamp (unit: ns).

-

map<string, string> labels

-

Key-value pair that can be used to identify a sandbox or a series of sandboxes.

-

map<string, string> annotations

-

Key-value pair that stores any information, whose values cannot be changed by the runtime.

-
- -- **KeyValue** - - The API is used to encapsulate key-value pairs. - - - - - - - - - - - - -

Parameter

-

Description

-

string key

-

Key

-

string value

-

Value

-
- -- **SELinuxOption** - - The API is used to specify the SELinux label of a container. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string user

-

User

-

string role

-

Role

-

string type

-

Type

-

string level

-

Level

-
- -- **ContainerMetadata** - - Container metadata contains all information that constructs a container name. It is recommended that the metadata be displayed on the user interface during container running to improve user experience. For example, a unique container name can be generated based on the metadata during running. - - - - - - - - - - - - -

Parameter

-

Description

-

string name

-

Container name.

-

uint32 attempt

-

Number of attempts to create a container.

-

Default value: 0

-
- -- **ContainerState** - - The API is used to specify enums of container status values. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

CONTAINER_CREATED = 0

-

The container is created.

-

CONTAINER_RUNNING = 1

-

The container is running.

-

CONTAINER_EXITED = 2

-

The container exits.

-

CONTAINER_UNKNOWN = 3

-

Unknown container status.

-
- -- **ContainerStateValue** - - The API is used to encapsulate the data structure of [ContainerState](#en-us_topic_0182207110_li65182518309). - - - - - - - - - -

Parameter

-

Description

-

ContainerState state

-

Container status value.

-
- -- **ContainerFilter** - - The API is used to add filter criteria for the container list. The intersection of multiple filter criteria is displayed. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Container ID.

-

PodSandboxStateValue state

-

Container status.

-

string pod_sandbox_id

-

Sandbox ID.

-

map<string, string> label_selector

-

Container label, which does not support regular expressions and must be fully matched.

-
- -- **LinuxContainerSecurityContext** - - The API is used to specify container security configurations. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Capability capabilities

-

Added or removed capabilities.

-

bool privileged

-

Whether the container is in privileged mode. Default value: false

-

NamespaceOption namespace_options

-

Container namespace options.

-

SELinuxOption selinux_options

-

SELinux context, which is optional. This parameter does not take effect now.

-

Int64Value run_as_user

-

UID for running container processes. Only run_as_user or run_as_username can be specified at a time. run_as_username takes effect preferentially.

-

string run_as_username

-

Username for running container processes. If specified, the user must exist in /etc/passwd in the container image and be parsed by the runtime. Otherwise, an error must occur during running.

-

bool readonly_rootfs

-

Whether the root file system in a container is read-only. The default value is configured in config.json.

-

repeated int64 supplemental_groups

-

List of user groups of the init process running in the container (except the primary GID).

-

string apparmor_profile

-

AppArmor configuration file of the container. This parameter does not take effect now.

-

string seccomp_profile_path

-

Path of the seccomp configuration file of the container.

-

bool no_new_privs

-

Whether to set the no_new_privs flag in the container.

-
- -- **LinuxContainerResources** - - The API is used to specify configurations of Linux container resources. - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

int64 cpu_period

-

CPU CFS period. Default value: 0

-

int64 cpu_quota

-

CPU CFS quota. Default value: 0

-

int64 cpu_shares

-

CPU share (relative weight). Default value: 0

-

int64 memory_limit_in_bytes

-

Memory limit (unit: byte). Default value: 0

-

int64 oom_score_adj

-

OOMScoreAdj that is used to adjust the OOM killer. Default value: 0

-

string cpuset_cpus

-

CPU core used by the container. Default value: null

-

string cpuset_mems

-

Memory nodes used by the container. Default value: null

-
- -- **Image** - - The API is used to describe the basic information about an image. - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Image ID.

-

repeated string repo_tags

-

Image tag name repo_tags.

-

repeated string repo_digests

-

Image digest information.

-

uint64 size

-

Image size.

-

Int64Value uid

-

Default image UID.

-

string username

-

Default image username.

-
- -- **ImageSpec** - - The API is used to represent the internal data structure of an image. Currently, ImageSpec encapsulates only the container image name. - - - - - - - - - -

Parameter

-

Description

-

string image

-

Container image name.

-
- -- **StorageIdentifier** - - The API is used to specify the unique identifier for defining the storage. - - - - - - - - - -

Parameter

-

Description

-

string uuid

-

Device UUID.

-
- -- **FilesystemUsage** - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

int64 timestamp

-

Timestamp when file system information is collected.

-

StorageIdentifier storage_id

-

UUID of the file system that stores images.

-

UInt64Value used_bytes

-

Size of the metadata that stores images.

-

UInt64Value inodes_used

-

Number of inodes of the metadata that stores images.

-
- -- **AuthConfig** - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string username

-

Username used for downloading images.

-

string password

-

Password used for downloading images.

-

string auth

-

Authentication information used for downloading images. The value is encoded by using Base64.

-

string server_address

-

IP address of the server where images are downloaded. This parameter does not take effect now.

-

string identity_token

-

Information about the token used for the registry authentication. This parameter does not take effect now.

-

string registry_token

-

Information about the token used for the interaction with the registry. This parameter does not take effect now.

-
- -- **Container** - - The API is used to describe container information, such as the ID and status. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Container ID.

-

string pod_sandbox_id

-

ID of the sandbox to which the container belongs.

-

ContainerMetadata metadata

-

Container metadata.

-

ImageSpec image

-

Image specifications.

-

string image_ref

-

Image used by the container. This parameter is an image ID for most runtime.

-

ContainerState state

-

Container status.

-

int64 created_at

-

Container creation timestamp (unit: ns).

-

map<string, string> labels

-

Key-value pair that can be used to identify a container or a series of containers.

-

map<string, string> annotations

-

Key-value pair that stores any information, whose values cannot be changed by the runtime.

-
- -- **ContainerStatus** - - The API is used to describe the container status information. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Container ID.

-

ContainerMetadata metadata

-

Container metadata.

-

ContainerState state

-

Container status.

-

int64 created_at

-

Container creation timestamp (unit: ns).

-

int64 started_at

-

Container start timestamp (unit: ns).

-

int64 finished_at

-

Container exit timestamp (unit: ns).

-

int32 exit_code

-

Container exit code.

-

ImageSpec image

-

Image specifications.

-

string image_ref

-

Image used by the container. This parameter is an image ID for most runtime.

-

string reason

-

Brief description of the reason why the container is in the current status.

-

string message

-

Information that is easy to read and indicates the reason why the container is in the current status.

-

map<string, string> labels

-

Key-value pair that can be used to identify a container or a series of containers.

-

map<string, string> annotations

-

Key-value pair that stores any information, whose values cannot be changed by the runtime.

-

repeated Mount mounts

-

Information about the container mount point.

-

string log_path

-

Path of the container log file that is in the log_directory folder configured in PodSandboxConfig.

-
- -- **ContainerStatsFilter** - - The API is used to add filter criteria for the container stats list. The intersection of multiple filter criteria is displayed. - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Container ID.

-

string pod_sandbox_id

-

Sandbox ID.

-

map<string, string> label_selector

-

Container label, which does not support regular expressions and must be fully matched.

-
- -- **ContainerStats** - - The API is used to add filter criteria for the container stats list. The intersection of multiple filter criteria is displayed. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

ContainerAttributes attributes

-

Container information.

-

CpuUsage cpu

-

CPU usage information.

-

MemoryUsage memory

-

Memory usage information.

-

FilesystemUsage writable_layer

-

Information about the writable layer usage.

-
- -- **ContainerAttributes** - - The API is used to list basic container information. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string id

-

Container ID.

-

ContainerMetadata metadata

-

Container metadata.

-

map<string,string> labels

-

Key-value pair that can be used to identify a container or a series of containers.

-

map<string,string> annotations

-

Key-value pair that stores any information, whose values cannot be changed by the runtime.

-
- -- **CpuUsage** - - The API is used to list the CPU usage information of a container. - - - - - - - - - - - - -

Parameter

-

Description

-

int64 timestamp

-

Timestamp.

-

UInt64Value usage_core_nano_seconds

-

CPU usage (unit: ns).

-
- -- **MemoryUsage** - - The API is used to list the memory usage information of a container. - - - - - - - - - - - - -

Parameter

-

Description

-

int64 timestamp

-

Timestamp.

-

UInt64Value working_set_bytes

-

Memory usage.

-
- -- **FilesystemUsage** - - The API is used to list the read/write layer information of a container. - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

int64 timestamp

-

Timestamp.

-

StorageIdentifier storage_id

-

Writable layer directory.

-

UInt64Value used_bytes

-

Number of bytes occupied by images at the writable layer.

-

UInt64Value inodes_used

-

Number of inodes occupied by images at the writable layer.

-
- -- **Device** - - The API is used to specify the host volume to be mounted to a container. - - - - - - - - - - - - - - -

Parameter

-

Description

-

string container_path

-

Mounting path of a container.

-

string host_path

-

Mounting path on the host.

-

string permissions

-

Cgroup permission of a device. (r indicates that containers can be read from a specified device. w indicates that containers can be written to a specified device. m indicates that containers can create new device files.)

-
- -- **LinuxContainerConfig** - - The API is used to specify Linux configurations. - - - - - - - - - - - -

Parameter

-

Description

-

LinuxContainerResources resources

-

Container resource specifications.

-

LinuxContainerSecurityContext security_context

-

Linux container security configuration.

-
- -- **ContainerConfig** - - The API is used to specify all mandatory and optional fields for creating a container. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

ContainerMetadata metadata

-

Container metadata. The information will uniquely identify a container and should be used at runtime to ensure correct operations. The information can also be used at runtime to optimize the user experience (UX) design, for example, construct a readable name. This parameter is mandatory.

-

ImageSpec image

-

Image used by the container. This parameter is mandatory.

-

repeated string command

-

Command to be executed. Default value: /bin/sh

-

repeated string args

-

Parameters of the command to be executed.

-

string working_dir

-

Current working path of the command.

-

repeated KeyValue envs

-

Environment variables configured in the container.

-

repeated Mount mounts

-

Information about the mount point to be mounted in the container.

-

repeated Device devices

-

Information about the device to be mapped in the container.

-

map<string, string> labels

-

Key-value pair that can be used to index and select a resource.

-

map<string, string> annotations

-

Unstructured key-value mappings that can be used to store and retrieve any metadata.

-

string log_path

-

Relative path to PodSandboxConfig.LogDirectory, which is used to store logs (STDOUT and STDERR) on the container host.

-

bool stdin

-

Whether to open stdin of the container.

-

bool stdin_once

-

Whether to immediately disconnect other data flows connected with stdin when a data flow connected with stdin is disconnected. This parameter does not take effect now.

-

bool tty

-

Whether to use a pseudo terminal to connect to stdio of the container.

-

LinuxContainerConfig linux

-

Container configuration information in the Linux system.

-
- -- **NetworkConfig** - - This API is used to specify runtime network configurations. - - - - - - - - -

Parameter

-

Description

-

string pod_cidr

-

CIDR used by pod IP addresses.

-
- -- **RuntimeConfig** - - This API is used to specify runtime network configurations. - - - - - - - - -

Parameter

-

Description

-

NetworkConfig network_config

-

Runtime network configurations.

-
- -- **RuntimeCondition** - - The API is used to describe runtime status information. - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string type

-

Runtime status type.

-

bool status

-

Runtime status.

-

string reason

-

Brief description of the reason for the runtime status change.

-

string message

-

Message with high readability, which indicates the reason for the runtime status change.

-
- -- **RuntimeStatus** - - The API is used to describe runtime status. - - - - - - - - -

Parameter

-

Description

-

repeated RuntimeCondition conditions

-

List of current runtime status.

-
- - - - -### Runtime Service - -The runtime service provides APIs for operating pods and containers, and APIs for querying the configuration and status information of the runtime service. - - - -#### RunPodSandbox - -#### Prototype - -``` -rpc RunPodSandbox(RunPodSandboxRequest) returns (RunPodSandboxResponse) {} -``` - -#### Description - -This API is used to create and start a PodSandbox. If the PodSandbox is successfully run, the sandbox is in the ready state. - -#### Precautions - -1. The default image for starting a sandbox is **rnd-dockerhub.huawei.com/library/pause-$\{**_machine_**\}:3.0** where **$\{**_machine_**\}** indicates the architecture. On x86\_64, the value of _machine_ is **amd64**. On ARM64, the value of _machine_ is **aarch64**. Currently, only the **amd64** or **aarch64** image can be downloaded from the rnd-dockerhub registry. If the image does not exist on the host, ensure that the host can download the image from the rnd-dockerhub registry. If you want to use another image, refer to **pod-sandbox-image** in the _iSulad Deployment Configuration_. -2. The container name is obtained from fields in [PodSandboxMetadata](#apis.md#en-us_topic_0182207110_li2359918134912) and separated by underscores \(\_\). Therefore, the metadata cannot contain underscores \(\_\). Otherwise, the [ListPodSandbox](#listpodsandbox.md#EN-US_TOPIC_0184808098) API cannot be used for query even when the sandbox is running successfully. - -#### Parameters - - - - - - - - - - - - -

Parameter

-

Description

-

PodSandboxConfig config

-

Sandbox configuration.

-

string runtime_handler

-

Runtime for the created sandbox. Currently, lcr and kata-runtime are supported.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

string pod_sandbox_id

-

If the operation is successful, the response is returned.

-
- -#### StopPodSandbox - -#### Prototype - -``` -rpc StopPodSandbox(StopPodSandboxRequest) returns (StopPodSandboxResponse) {} -``` - -#### Description - -This API is used to stop PodSandboxes and sandbox containers, and reclaim the network resources \(such as IP addresses\) allocated to a sandbox. If any running container belongs to the sandbox, the container must be forcibly stopped. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

string pod_sandbox_id

-

Sandbox ID.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

None

-

None

-
- -#### RemovePodSandbox - -#### Prototype - -``` -rpc RemovePodSandbox(RemovePodSandboxRequest) returns (RemovePodSandboxResponse) {} -``` - -#### Description - -This API is used to delete a sandbox. If any running container belongs to the sandbox, the container must be forcibly stopped and deleted. If the sandbox has been deleted, no errors will be returned. - -#### Precautions - -1. When a sandbox is deleted, network resources of the sandbox are not deleted. Before deleting a pod, you must call StopPodSandbox to clear network resources. Ensure that StopPodSandbox is called at least once before deleting the sandbox. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

string pod_sandbox_id

-

Sandbox ID.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

None

-

None

-
- -#### PodSandboxStatus - -#### Prototype - -``` -rpc PodSandboxStatus(PodSandboxStatusRequest) returns (PodSandboxStatusResponse) {} -``` - -#### Description - -This API is used to query the sandbox status. If the sandbox does not exist, an error is returned. - -#### Parameters - - - - - - - - - - - - -

Parameter

-

Description

-

string pod_sandbox_id

-

Sandbox ID

-

bool verbose

-

Whether to display additional information about the sandbox. This parameter does not take effect now.

-
- -#### Return Values - - - - - - - - - - - - -

Return Value

-

Description

-

PodSandboxStatus status

-

Status of the sandbox.

-

map<string, string> info

-

Additional information about the sandbox. The key can be any string, and the value is a JSON character string. The information can be any debugging content. When verbose is set to true, info cannot be empty. This parameter does not take effect now.

-
- -#### ListPodSandbox - -#### Prototype - -``` -rpc ListPodSandbox(ListPodSandboxRequest) returns (ListPodSandboxResponse) {} -``` - -#### Description - -This API is used to return the sandbox information list. Filtering based on criteria is supported. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

PodSandboxFilter filter

-

Filter criteria.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

repeated PodSandbox items

-

Sandbox information list.

-
- -#### CreateContainer - -``` -grpc::Status CreateContainer(grpc::ServerContext *context, const runtime::CreateContainerRequest *request, runtime::CreateContainerResponse *reply) {} -``` - -#### Description - -This API is used to create a container in the PodSandbox. - -#### Precautions - -- **sandbox\_config** in** CreateContainerRequest** is the same as the configuration transferred to **RunPodSandboxRequest** to create a PodSandbox. It is transferred again for reference only. PodSandboxConfig must remain unchanged throughout the lifecycle of a pod. -- The container name is obtained from fields in [ContainerMetadata](#apis.md#en-us_topic_0182207110_li17135914132319) and separated by underscores \(\_\). Therefore, the metadata cannot contain underscores \(\_\). Otherwise, the [ListContainers](#listcontainers.md#EN-US_TOPIC_0184808103) API cannot be used for query even when the sandbox is running successfully. -- **CreateContainerRequest** does not contain the **runtime\_handler** field. The runtime type of the container is the same as that of the corresponding sandbox. - -#### Parameters - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string pod_sandbox_id

-

ID of the PodSandbox where a container is to be created.

-

ContainerConfig config

-

Container configuration information.

-

PodSandboxConfig sandbox_config

-

PodSandbox configuration information.

-
- -#### Supplement - -Unstructured key-value mappings that can be used to store and retrieve any metadata. The field can be used to transfer parameters for the fields for which the CRI does not provide specific parameters. - -- Customize the field: - - - - - - - - - -

Custom key:value

-

Description

-

cgroup.pids.max:int64_t

-

Used to limit the number of processes or threads in a container. (Set the parameter to -1 for unlimited number.)

-
- - -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

string container_id

-

ID of the created container.

-
- -#### StartContainer - -#### Prototype - -``` -rpc StartContainer(StartContainerRequest) returns (StartContainerResponse) {} -``` - -#### Description - -This API is used to start a container. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

None

-

None

-
- -#### StopContainer - -#### Prototype - -``` -rpc StopContainer(StopContainerRequest) returns (StopContainerResponse) {} -``` - -#### Description - -This API is used to stop a running container. You can set a graceful timeout time. If the container has been stopped, no errors will be returned. - -#### Parameters - - - - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-

int64 timeout

-

Waiting time before a container is forcibly stopped. The default value is 0, indicating forcible stop.

-
- -#### Return Values - -None - -#### RemoveContainer - -#### Prototype - -``` -rpc RemoveContainer(RemoveContainerRequest) returns (RemoveContainerResponse) {} -``` - -#### Description - -This API is used to delete a container. If the container is running, it must be forcibly stopped. If the container has been deleted, no errors will be returned. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-
- -#### Return Values - -None - -#### ListContainers - -#### Prototype - -``` -rpc ListContainers(ListContainersRequest) returns (ListContainersResponse) {} -``` - -#### Description - -This API is used to return the container information list. Filtering based on criteria is supported. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

ContainerFilter filter

-

Filter criteria.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

repeated Container containers

-

Container information list.

-
- -#### ContainerStatus - -#### Prototype - -``` -rpc ContainerStatus(ContainerStatusRequest) returns (ContainerStatusResponse) {} -``` - -#### Description - -This API is used to return the container status information. If the container does not exist, an error will be returned. - -#### Parameters - - - - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-

bool verbose

-

Whether to display additional information about the sandbox. This parameter does not take effect now.

-
- -#### Return Values - - - - - - - - - - - - -

Return Value

-

Description

-

ContainerStatus status

-

Container status information.

-

map<string, string> info

-

Additional information about the sandbox. The key can be any string, and the value is a JSON character string. The information can be any debugging content. When verbose is set to true, info cannot be empty. This parameter does not take effect now.

-
- -#### UpdateContainerResources - -#### Prototype - -``` -rpc UpdateContainerResources(UpdateContainerResourcesRequest) returns (UpdateContainerResourcesResponse) {} -``` - -#### Description - -This API is used to update container resource configurations. - -#### Precautions - -- This API cannot be used to update the pod resource configurations. -- The value of **oom\_score\_adj** of any container cannot be updated. - -#### Parameters - - - - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-

LinuxContainerResources linux

-

Linux resource configuration information.

-
- -#### Return Values - -None - -#### ExecSync - -#### Prototype - -``` -rpc ExecSync(ExecSyncRequest) returns (ExecSyncResponse) {} -``` - -#### Description - -The API is used to run a command in containers in synchronization mode through the gRPC communication method. - -#### Precautions - -The interaction between the terminal and the containers must be disabled when a single command is executed. - -#### Parameters - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-

repeated string cmd

-

Command to be executed.

-

int64 timeout

-

Timeout period for stopping the command (unit: second). The default value is 0, indicating that there is no timeout limit. This parameter does not take effect now.

-
- -#### Return Values - - - - - - - - - - - - - - - -

Return Value

-

Description

-

bytes stdout

-

Standard output of the capture command.

-

bytes stderr

-

Standard error output of the capture command.

-

int32 exit_code

-

Exit code, which represents the completion of command execution. The default value is 0, indicating that the command is executed successfully.

-
- -#### Exec - -#### Prototype - -``` -rpc Exec(ExecRequest) returns (ExecResponse) {} -``` - -#### Description - -This API is used to run commands in a container through the gRPC communication method, that is, obtain URLs from the CRI server, and then use the obtained URLs to establish a long connection to the WebSocket server, implementing the interaction with the container. - -#### Precautions - -The interaction between the terminal and the container can be enabled when a single command is executed. One of **stdin**, **stdout**, and **stderr **must be true. If **tty** is true, **stderr** must be false. Multiplexing is not supported. In this case, the output of **stdout** and **stderr** will be combined to a stream. - -#### Parameters - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-

repeated string cmd

-

Command to be executed.

-

bool tty

-

Whether to run the command in a TTY.

-

bool stdin

-

Whether to generate the standard input stream.

-

bool stdout

-

Whether to generate the standard output stream.

-

bool stderr

-

Whether to generate the standard error output stream.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

string url

-

Fully qualified URL of the exec streaming server.

-
- -#### Attach - -#### Prototype - -``` -rpc Attach(AttachRequest) returns (AttachResponse) {} -``` - -#### Description - -This API is used to take over the init process of a container through the gRPC communication method, that is, obtain URLs from the CRI server, and then use the obtained URLs to establish a long connection to the WebSocket server, implementing the interaction with the container. Only containers whose runtime is of the LCR type are supported. - -#### Parameters - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-

bool tty

-

Whether to run the command in a TTY.

-

bool stdin

-

Whether to generate the standard input stream.

-

bool stdout

-

Whether to generate the standard output stream.

-

bool stderr

-

Whether to generate the standard error output stream.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

string url

-

Fully qualified URL of the attach streaming server.

-
- -#### ContainerStats - -#### Prototype - -``` -rpc ContainerStats(ContainerStatsRequest) returns (ContainerStatsResponse) {} -``` - -#### Description - -This API is used to return information about resources occupied by a container. Only containers whose runtime is of the LCR type are supported. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

string container_id

-

Container ID.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

ContainerStats stats

-

Container information. Note: Disks and inodes support only the query of containers started by OCI images.

-
- -#### ListContainerStats - -#### Prototype - -``` -rpc ListContainerStats(ListContainerStatsRequest) returns (ListContainerStatsResponse) {} -``` - -#### Description - -This API is used to return the information about resources occupied by multiple containers. Filtering based on criteria is supported. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

ContainerStatsFilter filter

-

Filter criteria.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

repeated ContainerStats stats

-

Container information list. Note: Disks and inodes support only the query of containers started by OCI images.

-
- -#### UpdateRuntimeConfig - -#### Prototype - -``` -rpc UpdateRuntimeConfig(UpdateRuntimeConfigRequest) returns (UpdateRuntimeConfigResponse); -``` - -#### Description - -This API is used as a standard CRI to update the pod CIDR of the network plug-in. Currently, the CNI network plug-in does not need to update the pod CIDR. Therefore, this API records only access logs. - -#### Precautions - -API operations will not modify the system management information, but only record a log. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

RuntimeConfig runtime_config

-

Information to be configured for the runtime.

-
- -#### Return Values - -None - -#### Status - -#### Prototype - -``` -rpc Status(StatusRequest) returns (StatusResponse) {}; -``` - -#### Description - -This API is used to obtain the network status of the runtime and pod. Obtaining the network status will trigger the update of network configuration. Only containers whose runtime is of the LCR type are supported. - -#### Precautions - -If the network configuration fails to be updated, the original configuration is not affected. The original configuration is overwritten only when the update is successful. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

bool verbose

-

Whether to display additional runtime information. This parameter does not take effect now.

-
- -#### Return Values - - - - - - - - - - - - -

Return Value

-

Description

-

RuntimeStatus status

-

Runtime status.

-

map<string, string> info

-

Additional information about the runtime. The key of info can be any value. The value must be in JSON format and can contain any debugging information. When verbose is set to true, info cannot be empty.

-
- -### Image Service - -The service provides the gRPC API for pulling, viewing, and removing images from the registry. - - -#### ListImages - -#### Prototype - -``` -rpc ListImages(ListImagesRequest) returns (ListImagesResponse) {} -``` - -#### Description - -This API is used to list existing image information. - -#### Precautions - -This is a unified API. You can run the **cri images** command to query embedded images. However, embedded images are not standard OCI images. Therefore, query results have the following restrictions: - -- An embedded image does not have an image ID. Therefore, the value of **image ID** is the config digest of the image. -- An embedded image has only config digest, and it does not comply with the OCI image specifications. Therefore, the value of **digest** cannot be displayed. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

ImageSpec filter

-

Name of the image to be filtered.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

repeated Image images

-

Image information list.

-
- -#### ImageStatus - -#### Prototype - -``` -rpc ImageStatus(ImageStatusRequest) returns (ImageStatusResponse) {} -``` - -#### Description - -The API is used to query the information about a specified image. - -#### Precautions - -1. If the image to be queried does not exist, **ImageStatusResponse** is returned and **Image** is set to **nil** in the return value. -2. This is a unified API. Since embedded images do not comply with the OCI image specifications and do not contain required fields, the images cannot be queried by using this API. - -#### Parameters - - - - - - - - - - - - -

Parameter

-

Description

-

ImageSpec image

-

Image name.

-

bool verbose

-

Whether to query additional information. This parameter does not take effect now. No additional information is returned.

-
- -#### Return Values - - - - - - - - - - - - -

Return Value

-

Description

-

Image image

-

Image information.

-

map<string, string> info

-

Additional image information. This parameter does not take effect now. No additional information is returned.

-
- -#### PullImage - -#### Prototype - -``` - rpc PullImage(PullImageRequest) returns (PullImageResponse) {} -``` - -#### Description - -This API is used to download images. - -#### Precautions - -Currently, you can download public images, and use the username, password, and auth information to download private images. The **server\_address**, **identity\_token**, and **registry\_token** fields in **authconfig** cannot be configured. - -#### Parameters - - - - - - - - - - - - - - - -

Parameter

-

Description

-

ImageSpec image

-

Name of the image to be downloaded.

-

AuthConfig auth

-

Verification information for downloading a private image.

-

PodSandboxConfig sandbox_config

-

Whether to download an image in the pod context. This parameter does not take effect now.

-
- -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

string image_ref

-

Information about the downloaded image.

-
- -#### RemoveImage - -#### Prototype - -``` -rpc RemoveImage(RemoveImageRequest) returns (RemoveImageResponse) {} -``` - -#### Description - -This API is used to delete specified images. - -#### Precautions - -This is a unified API. Since embedded images do not comply with the OCI image specifications and do not contain required fields, you cannot delete embedded images by using this API and the image ID. - -#### Parameters - - - - - - - - - -

Parameter

-

Description

-

ImageSpec image

-

Name or ID of the image to be deleted.

-
- -#### Return Values - -None - -#### ImageFsInfo - -#### Prototype - -``` -rpc ImageFsInfo(ImageFsInfoRequest) returns (ImageFsInfoResponse) {} -``` - -#### Description - -This API is used to query the information about the file system that stores images. - -#### Precautions - -Queried results are the file system information in the image metadata. - -#### Parameters - -None - -#### Return Values - - - - - - - - - -

Return Value

-

Description

-

repeated FilesystemUsage image_filesystems

-

Information about the file system that stores images.

-
- -### Constraints - -1. If **log\_directory** is configured in the **PodSandboxConfig** parameter when a sandbox is created, **log\_path** must be specified in **ContainerConfig** when all containers that belong to the sandbox are created. Otherwise, the containers may not be started or deleted by using the CRI. - - The actual value of **LOGPATH** of containers is **log\_directory/log\_path**. If **log\_path** is not set, the final value of **LOGPATH** is changed to **log\_directory**. - - - If the path does not exist, iSulad will create a soft link pointing to the actual path of container logs when starting a container. Then **log\_directory** becomes a soft link. There are two cases: - 1. In the first case, if **log\_path** is not configured for other containers in the sandbox, **log\_directory** will be deleted and point to **log\_path** of the newly started container. As a result, logs of the first started container point to logs of the later started container. - 2. In the second case, if **log\_path** is configured for other containers in the sandbox, the value of **LOGPATH** of the container is **log\_directory/log\_path**. Because **log\_directory** is a soft link, the creation fails when **log\_directory/log\_path** is used as the soft link to point to the actual path of container logs. - - - If the path exists, iSulad will attempt to delete the path \(non-recursive\) when starting a container. If the path is a folder path containing content, the deletion fails. As a result, the soft link fails to be created, the container fails to be started, and the same error occurs when the container is going to be deleted. - -2. If **log\_directory** is configured in the **PodSandboxConfig** parameter when a sandbox is created, and **log\_path** is specified in **ContainerConfig** when a container is created, the final value of **LOGPATH** is **log\_directory/log\_path**. iSulad does not recursively create **LOGPATH**, therefore, you must ensure that **dirname\(LOGPATH\)** exists, that is, the upper-level path of the final log file path exists. -3. If **log\_directory** is configured in the **PodSandboxConfig** parameter when a sandbox is created, and the same **log\_path** is specified in **ContainerConfig** when multiple containers are created, or if containers in different sandboxes point to the same **LOGPATH**, the latest container log path will overwrite the previous path after the containers are started successfully. -4. If the image content in the remote registry changes and the original image is stored in the local host, the name and tag of the original image are changed to **none** when you call the CRI Pull image API to download the image again. - - An example is as follows: - - Locally stored images: - - ``` - IMAGE TAG IMAGE ID SIZE - rnd-dockerhub.huawei.com/pproxyisulad/test latest 99e59f495ffaa 753kB - ``` - - After the **rnd-dockerhub.huawei.com/pproxyisulad/test:latest** image in the remote registry is updated and downloaded again: - - ``` - IMAGE TAG IMAGE ID SIZE - 99e59f495ffaa 753kB - rnd-dockerhub.huawei.com/pproxyisulad/test latest d8233ab899d41 1.42MB - ``` - - Run the **isula images** command. The value of **REF** is displayed as **-**. - - ``` - REF IMAGE ID CREATED SIZE - rnd-dockerhub.huawei.com/pproxyisulad/test:latest d8233ab899d41 2019-02-14 19:19:37 1.42MB - - 99e59f495ffaa 2016-05-04 02:26:41 753kB - ``` - - diff --git a/docs/en/docs/Container/docker-container.md b/docs/en/docs/Container/docker-container.md deleted file mode 100644 index 0c22297da0329b232603efb8bef7f8c1b0d385ae..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/docker-container.md +++ /dev/null @@ -1,4 +0,0 @@ -# Docker Container - -Docker is an open-source Linux container engine that enables quick application packaging, deployment, and delivery. The original meaning of Docker is dork worker, whose job is to pack the goods to the containers, and move containers, and load containers. Similarly, the job of Docker in Linux is to pack applications to containers, and deploy and run applications on various platforms using containers. Docker uses Linux Container technology to turn applications into standardized, portable, and self-managed components, enabling the "build once" and "run everywhere" features of applications. Features of Docker technology include: quick application release, easy application deployment and management, and high application density. - diff --git a/docs/en/docs/Container/dynamically-loading-the-kernel-module.md b/docs/en/docs/Container/dynamically-loading-the-kernel-module.md deleted file mode 100644 index 7c1458075e1501d1422440e86b86ad3f3512467d..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/dynamically-loading-the-kernel-module.md +++ /dev/null @@ -1,56 +0,0 @@ -Dynamically Loading the Kernel Module - -- [Dynamically Loading the Kernel Module](#dynamically-loading-the-kernel-module) - - -## Function Description - -Services in a container may depend on some kernel modules. You can set environment variables to dynamically load the kernel modules required by services in the container to the host before the system container starts. This feature must be used together with isulad-hooks. For details, see [Dynamically Managing Container Resources \(syscontainer-tools\)](./dynamically-managing-container-resources-(syscontainer-tools).md). - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

-e KERNEL_MODULES=module_name1,module_name

-
  • Variable of the string type.
  • This parameter can be set to multiple modules. Use commas (,) to separate module names.
-
- -## Constraints - -- If loaded kernel modules are not verified or conflict with existing modules on the host, an unpredictable error may occur on the host. Therefore, exercise caution when loading kernel modules. -- Dynamic kernel module loading transfers kernel modules to be loaded to containers. This function is implemented by capturing environment variables for container startup using isulad-tools. Therefore, this function relies on the proper installation and deployment of isulad-tools. -- Loaded kernel modules need to be manually deleted. - -## Example - -When starting a system container, specify the **-e KERNEL\_MODULES** parameter. After the system container is started, the ip\_vs module is successfully loaded to the kernel. - -``` -[root@localhost ~]# lsmod | grep ip_vs -[root@localhost ~]# isula run -tid -e KERNEL_MODULES=ip_vs,ip_vs_wrr --hook-spec /etc/isulad-tools/hookspec.json --system-container --external-rootfs /root/myrootfs none init -ae18c4281d5755a1e153a7bff6b3b4881f36c8e528b9baba8a3278416a5d0980 -[root@localhost ~]# lsmod | grep ip_vs -ip_vs_wrr 16384 0 -ip_vs 176128 2 ip_vs_wrr -nf_conntrack 172032 7 xt_conntrack,nf_nat,nf_nat_ipv6,ipt_MASQUERADE,nf_nat_ipv4,nf_conntrack_netlink,ip_vs -nf_defrag_ipv6 20480 2 nf_conntrack,ip_vs -libcrc32c 16384 3 nf_conntrack,nf_nat,ip_vs -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- isulad-tools must be installed on the host. ->- **--hooks-spec** must be set to **isulad hooks**. - diff --git a/docs/en/docs/Container/dynamically-managing-container-resources-(syscontainer-tools).md b/docs/en/docs/Container/dynamically-managing-container-resources-(syscontainer-tools).md deleted file mode 100644 index 1c0aa377631d6c50d51d63b5b89fba237f1f658f..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/dynamically-managing-container-resources-(syscontainer-tools).md +++ /dev/null @@ -1,495 +0,0 @@ -# Dynamically Managing Container Resources \(syscontainer-tools\) - -- [Dynamically Managing Container Resources \(syscontainer-tools\)](#dynamically-managing-container-resources-(syscontainer-tools)) - - [Device Management](#device-management) - - [NIC Management](#nic-management) - - [Route Management](#route-management) - - [Volume Mounting Management](#volume-mounting-management) - - -Resources in common containers cannot be managed. For example, a block device cannot be added to a common container, and a physical or virtual NIC cannot be inserted to a common container. In the system container scenario, the syscontainer-tools can be used to dynamically mount or unmount block devices, network devices, routes, and volumes for containers. - -To use this function, you need to install the syscontainer-tools first. - -``` -[root@localhost ~]# yum install syscontainer-tools -``` - - -## Device Management - -### Function Description - -isulad-tools allows you to add block devices \(such as disks and logical volume managers\) or character devices \(such as GPUs, binners, and FUSEs\) on the host to a container. The devices can be used in the container. For example, you can run the **fdisk** command to format the disk and write data to the file system. If the devices are not required, isulad-tools allows you to delete them from the container and return them to the host. - -### Command Format - -``` -isulad-tools [COMMADN][OPTIONS] [ARG...] -``` - -In the preceding format: - -**COMMAND**: command related to device management. - -**OPTIONS**: option supported by the device management command. - -**container\_id**: container ID. - -**ARG**: parameter corresponding to the command. - -### Parameter Description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Function Description

-

Option Description

-

Parameter Description

-

add-device

-

Adds block devices or character devices on the host to a container.

-

Supported options are as follows:

-
  • --blkio-weight-device: sets the I/O weight (relative weight, ranging from 10 to 100) of a block device.
  • --device-read-bps: sets the read rate limit for the block device (byte/s).
  • --device-read-iops: sets the read rate limit for the block device (I/O/s).
  • --device-write-bps: sets the write rate limit for the block device (byte/s).
  • --device-write-iops: sets the write rate limit for the block device (I/O/s).
  • --follow-partition: If a block device is a basic block device (primary SCSI block disk), set this parameter to add all partitions of the primary disk.
  • --force: If any block device or character device already exists in the container, use this parameter to overwrite the old block device or character device files.
  • --update-config-only: updates configuration files only and does not add disks.
-

Parameter format: hostdevice[:containerdevice][:permission] [hostdevice[:containerdevice][:permission]]

-

In the preceding format:

-

hostdevice: path on the host for storing a device.

-

containerdevice: path on the container for storing a device.

-

permission: operation permission on a device within the container.

-

remove-device

-

Deletes block devices or character devices from a container and restores them to the host.

-

Supported options are as follows:

-

--follow-partition: If a block device is a basic block device (primary SCSI block disk), set this parameter to delete all partitions of the primary disk in the container, and restore them to the host.

-

Parameter format: hostdevice[:containerdevice] [hostdevice[:containerdevice]]

-

In the preceding format:

-

hostdevice: path on the host for storing a device.

-

containerdevice: path on the container for storing a device.

-

list-device

-

Lists all block devices or character devices in a container.

-

Supported options are as follows:

-
  • --pretty: outputs data in JSON format.
  • --sub-partition: For a primary disk, add this flag to display the primary disk and its sub-partitions.
-

None

-

update-device

-

Updates the disk QoS.

-

Supported options are as follows:

-
  • --device-read-bps: sets the read rate limit for the block device (byte/s). You are advised to set this parameter to a value greater than or equal to 1024.
  • --device-read-iops: sets the read rate limit for the block device (I/O/s).
  • --device-write-bps: sets the write rate limit for the block device (byte/s). You are advised to set this parameter to a value greater than or equal to 1024.
  • --device-write-iops: sets the write rate limit for the block device (I/O/s).
-

None

-
- -### Constraints - -- You can add or delete devices when container instances are not running. After the operation is complete, you can start the container to view the device status. You can also dynamically add a device when the container is running. -- Do not concurrently run the **fdisk** command to format disks in a container and on the host. Otherwise, the container disk usage will be affected. -- When you run the **add-device** command to add a disk to a specific directory of a container, if the parent directory in the container is a multi-level directory \(for example, **/dev/a/b/c/d/e**\) and the directory level does not exist, isulad-tools will automatically create the corresponding directory in the container. When the disk is deleted, the created parent directory is not deleted. If you run the **add-device** command to add a device to this parent directory again, a message is displayed, indicating that a device already exists and cannot be added. -- When you run the** add-device** command to add a disk or update disk parameters, you need to configure the disk QoS. Do not set the write or read rate limit for the block device \(I/O/s or byte/s\) to a small value. If the value is too small, the disk may be unreadable \(the actual reason is the speed is too slow\), affecting service functions. -- When you run the **--blkio-weight-device** command to limit the weight of a specified block device, if the block device supports only the BFQ mode, an error may be reported, prompting you to check whether the current OS environment supports setting the weight of the BFQ block device. - -### Example - -- Start a system container, and set **hook spec** to the isulad hook execution script. - - ``` - [root@localhost ~]# isula run -tid --hook-spec /etc/isulad-tools/hookspec.json --system-container --external-rootfs /root/root-fs none init - eed1096c8c7a0eca6d92b1b3bc3dd59a2a2adf4ce44f18f5372408ced88f8350 - ``` - - -- Add a block device to a container. - - ``` - [root@localhost ~]# isulad-tools add-device ee /dev/sdb:/dev/sdb123 - Add device (/dev/sdb) to container(ee,/dev/sdb123) done. - [root@localhost ~]# isula exec ee fdisk -l /dev/sdb123 - Disk /dev/sdb123: 50 GiB, 53687091200 bytes, 104857600 sectors - Units: sectors of 1 * 512 = 512 bytes - Sector size (logical/physical): 512 bytes / 512 bytes - I/O size (minimum/optimal): 512 bytes / 512 bytes - Disklabel type: dos - Disk identifier: 0xda58a448 - - Device Boot Start End Sectors Size Id Type - /dev/sdb123p1 2048 104857599 104855552 50G 5 Extended - /dev/sdb123p5 4096 104857599 104853504 50G 83 Linux - ``` - -- Update the device information. - - ``` - [root@localhost ~]# isulad-tools update-device --device-read-bps /dev/sdb:10m ee - Update read bps for device (/dev/sdb,10485760) done. - ``` - -- Delete a device. - - ``` - [root@localhost ~]# isulad-tools remove-device ee /dev/sdb:/dev/sdb123 - Remove device (/dev/sdb) from container(ee,/dev/sdb123) done. - Remove read bps for device (/dev/sdb) done. - ``` - - -## NIC Management - -### Function Description - -isulad-tools allows you to insert physical or virtual NICs on the host to a container. If the NICs are not required, isulad-tools allows you to delete them from the container and return them to the host. In addition, the NIC configurations can be dynamically modified. To insert a physical NIC, add the NIC on the host to the container. To insert a virtual NIC, create a veth pair and insert its one end to the container. - -### Command Format - -``` -isulad-tools [COMMADN][OPTIONS] -``` - -In the preceding format: - -**COMMAND**: command related to NIC management. - -**OPTIONS**: option supported by the NIC management command. - -**container\_id**: container ID. - -### Parameter Description - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Function Description

-

Option Description

-

add-nic

-

Creates an NIC for a container.

-

Supported options are as follows:

-
  • --type: specifies the NIC type. Only eth and veth are supported.
  • --name: specifies the NIC name. The format is [host:]container. If host is not specified, a random value is used.
  • --ip: specifies the NIC IP address.
  • --mac: specifies the NIC MAC address.
  • --bridge: specifies the network bridge bound to the NIC.
  • --mtu: specifies the MTU value of the NIC. The default value is 1500.
  • --update-config-only: If this flag is set, only configuration files are updated and NICs are not added.
  • --qlen: specifies the value of QLEN. The default value is 1000.
-

remove-nic

-

Deletes NICs from a container and restores them to the host.

-

Supported options are as follows:

-
  • --type: specifies the NIC type.
  • --name: specifies the name of the NIC. The format is [host:]container.
-

list-nic

-

Lists all NICs in a container.

-

Supported options are as follows:

-
  • --pretty: outputs data in JSON format.
  • --filter: outputs filtered data in the specific format, for example, --filter' {"ip":"192.168.3.4/24", "Mtu":1500}'.
-

update-nic

-

Modifies configuration parameters of a specified NIC in a container.

-

Supported options are as follows:

-
  • --name: specifies the name of the NIC in the container. This parameter is mandatory.
  • --ip: specifies the NIC IP address.
  • --mac: specifies the NIC MAC address.
  • --bridge: specifies the network bridge bound to the NIC.
  • --mtu: specifies the MTU value of the NIC.
  • --update-config-only: If this flag is set, configuration files are updated and NICs are not updated.
  • --qlen: specifies the value of QLEN.
-
- -### Constraints - -- Physical NICs \(eth\) and virtual NICs \(veth\) can be added. -- When adding a NIC, you can also configure the NIC. The configuration parameters include **--ip**, **--mac**, **--bridge**, **--mtu**, **--qlen**. -- A maximum of eight physical NICs can be added to a container. -- If you run the **isulad-tools add-nic** command to add an eth NIC to a container and do not add a hook, you must manually delete the NIC before the container exits. Otherwise, the name of the eth NIC on the host will be changed to the name of that in the container. -- For a physical NIC \(except 1822 VF NIC\), use the original MAC address when running the **add-nic** command. Do not change the MAC address in the container, or when running the **update-nic** command. -- When using the **isulad-tools add-nic** command, set the MTU value. The value range depends on the NIC model. -- When using isulad-tools to add NICs and routes to containers, you are advised to run the **add-nic** command to add NICs and then run the **add-route** command to add routes. When using isulad-tools to delete NICs and routes from a container, you are advised to run the **remove-route** command to delete routes and then run the **remove-nic** command to delete NICs. -- When using isulad-tools to add NICs, add a NIC to only one container. - -### Example - -- Start a system container, and set **hook spec** to the isulad hook execution script. - - ``` - [root@localhost ~]# isula run -tid --hook-spec /etc/isulad-tools/hookspec.json --system-container --external-rootfs /root/root-fs none init - 2aaca5c1af7c872798dac1a468528a2ccbaf20b39b73fc0201636936a3c32aa8 - ``` - - -- Add a virtual NIC to a container. - - ``` - [root@localhost ~]# isulad-tools add-nic --type "veth" --name abc2:bcd2 --ip 172.17.28.5/24 --mac 00:ff:48:13:xx:xx --bridge docker0 2aaca5c1af7c - Add network interface to container 2aaca5c1af7c (bcd2,abc2) done - ``` - -- Add a physical NIC to a container. - - ``` - [root@localhost ~]# isulad-tools add-nic --type "eth" --name eth3:eth1 --ip 172.17.28.6/24 --mtu 1300 --qlen 2100 2aaca5c1af7c - Add network interface to container 2aaca5c1af7c (eth3,eth1) done - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >When adding a virtual or physical NIC, ensure that the NIC is in the idle state. Adding a NIC in use will disconnect the system network. - - -## Route Management - -### Function Description - -isulad-tools can be used to dynamically add or delete routing tables for system containers. - -### Command Format - -``` -isulad-tools [COMMADN][OPTIONS] [ARG...] -``` - -In the preceding format: - -**COMMAND**: command related to route management. - -**OPTIONS**: option supported by the route management command. - -**container\_id**: container ID. - -**ARG**: parameter corresponding to the command. - -### API Description - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Function Description

-

Option Description

-

Parameter Description

-

add-route

-

Adds the network routing rules to a container.

-

Supported options are as follows:

-

--update-config-only: If this parameter is configured, configuration files are updated and routing tables are not updated.

-

Parameter format: [{rule1},{rule2}]

-

Example of rule:

-

'[{"dest":"default", "gw":"192.168.10.1"},{"dest":"192.168.0.0/16","dev":"eth0","src":"192.168.1.2"}]'

-
  • dest: target network. If this parameter is left blank, the default gateway is used.
  • src: source IP address of a route.
  • gw: route gateway.
  • dev: network device.
-

remove-route

-

Deletes a route from a container.

-

Supported options are as follows:

-

--update-config-only: If this parameter is configured, only configuration files are updated and routes are not deleted from the container.

-

Parameter format: [{rule1},{rule2}]

-

Example of rule:

-

'[{"dest":"default", "gw":"192.168.10.1"},{"dest":"192.168.0.0/16","dev":"eth0","src":"192.168.1.2"}]'

-
  • dest: target network. If this parameter is left blank, the default gateway is used.
  • src: source IP address of a route.
  • gw: route gateway.
  • dev: network device.
-

list-route

-

Lists all routing rules in a container.

-

Supported options are as follows:

-
  • --pretty: outputs data in JSON format.
  • --filter: outputs filtered data in the specific format, for example, --filter' {"ip":"192.168.3.4/24", "Mtu":1500}'.
-

None

-
- -### Constraints - -- When using isulad-tools to add NICs and routes to containers, you are advised to run the **add-nic** command to add NICs and then run the **add-route** command to add routes. When using isulad-tools to delete NICs and routes from a container, you are advised to run the **remove-route** command to delete routes and then run the **remove-nic** command to delete NICs. -- When adding a routing rule to a container, ensure that the added routing rule does not conflict with existing routing rules in the container. - -### Example - -- Start a system container, and set **hook spec** to the isulad hook execution script. - - ``` - [root@localhost ~]# isula run -tid --hook-spec /etc/isulad-tools/hookspec.json --system-container --external-rootfs /root/root-fs none init - 0d2d68b45aa0c1b8eaf890c06ab2d008eb8c5d91e78b1f8fe4d37b86fd2c190b - ``` - - -- Use isulad-tools to add a physical NIC to the system container. - - ``` - [root@localhost ~]# isulad-tools add-nic --type "eth" --name enp4s0:eth123 --ip 172.17.28.6/24 --mtu 1300 --qlen 2100 0d2d68b45aa0 - Add network interface (enp4s0) to container (0d2d68b45aa0,eth123) done - ``` - - -- isulad-tools adds a routing rule to the system container. Format example: **\[\{"dest":"default", "gw":"192.168.10.1"\},\{"dest":"192.168.0.0/16","dev":"eth0","src":"192.168.1.2"\}\]**. If **dest** is left blank, its value will be **default**. - - ``` - [root@localhost ~]# isulad-tools add-route 0d2d68b45aa0 '[{"dest":"172.17.28.0/32", "gw":"172.17.28.5","dev":"eth123"}]' - Add route to container 0d2d68b45aa0, route: {dest:172.17.28.0/32,src:,gw:172.17.28.5,dev:eth123} done - ``` - -- Check whether a routing rule is added in the container. - - ``` - [root@localhost ~]# isula exec -it 0d2d68b45aa0 route - Kernel IP routing table - Destination Gateway Genmask Flags Metric Ref Use Iface - 172.17.28.0 172.17.28.5 255.255.255.255 UGH 0 0 0 eth123 - 172.17.28.0 0.0.0.0 255.255.255.0 U 0 0 0 eth123 - ``` - - -## Volume Mounting Management - -### Function Description - -In a common container, you can set the **--volume** parameter during container creation to mount directories or volumes of the host to the container for resource sharing. However, during container running, you cannot unmount directories or volumes that are mounted to the container, or mount directories or volumes of the host to the container. Only the system container can use the isulad-tools tool to dynamically mount directories or volumes of the host to the container and unmount directories or volumes from the container. - -### Command Format - -``` -isulad-tools [COMMADN][OPTIONS] [ARG...] -``` - -In the preceding format: - -**COMMAND**: command related to route management. - -**OPTIONS**: option supported by the route management command. - -**container\_id**: container ID. - -**ARG**: parameter corresponding to the command. - -### API Description - -**Table 1**    - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Function Description

-

Option Description

-

Parameter Description

-

add-path

-

Adds files or directories on the host to a container.

-

None

-

The parameter format is as follows:

-

hostpath:containerpath:permission [hostpath:containerpath:permission ...]

-

In the preceding format:

-

hostpath: path on the host for storing a volume.

-

containerpath: path on the container for storing a volume.

-

permission: operation permission on a mount path within the container.

-

remove-path

-

Deletes directories or files from the container and restores them to the host.

-

None

-

Parameter format: hostpath:containerpath[hostpath:containerpath ]

-

In the preceding format:

-

hostpath: path on the host for storing a volume.

-

containerpath: path on the container for storing a volume.

-

list-path

-

Lists all path directories in a container.

-

Supported options are as follows:

-

--pretty: outputs data in JSON format.

-

None

-
- -### Constraints - -- When running the **add-path** command, specify an absolute path as the mount path. -- The mount point /.sharedpath is generated on the host after the mount path is specified by running the **add-path** command. -- A maximum of 128 volumes can be added to a container. -- Do not overwrite the root directory \(/\) in a container with the host directory by running the **add-path** command. Otherwise, the function is affected. - -### Example - -- Start a system container, and set **hook spec** to the isulad hook execution script. - - ``` - [root@localhost ~]# isula run -tid --hook-spec /etc/isulad-tools/hookspec.json --system-container --external-rootfs /root/root-fs none init - e45970a522d1ea0e9cfe382c2b868d92e7b6a55be1dd239947dda1ee55f3c7f7 - ``` - - -- Use isulad-tools to mount a directory on the host to a container, implementing resource sharing. - - ``` - [root@localhost ~]# isulad-tools add-path e45970a522d1 /home/test123:/home/test123 - Add path (/home/test123) to container(e45970a522d1,/home/test123) done. - ``` - -- Create a file in the **/home/test123** directory on the host and check whether the file can be accessed in the container. - - ``` - [root@localhost ~]# echo "hello world" > /home/test123/helloworld - [root@localhost ~]# isula exec e45970a522d1 bash - [root@localhost /]# cat /home/test123/helloworld - hello world - ``` - -- Use isulad-tools to delete the mount directory from the container. - - ``` - [root@localhost ~]# isulad-tools remove-path e45970a522d1 /home/test123:/home/test123 - Remove path (/home/test123) from container(e45970a522d1,/home/test123) done - [root@localhost ~]# isula exec e45970a522d1 bash - [root@localhost /]# ls /home/test123/helloworld - ls: cannot access '/home/test123/helloworld': No such file or directory - ``` - - diff --git a/docs/en/docs/Container/environment-variable-persisting.md b/docs/en/docs/Container/environment-variable-persisting.md deleted file mode 100644 index f296c5c144245df505a9256b6373ace2a983f82d..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/environment-variable-persisting.md +++ /dev/null @@ -1,51 +0,0 @@ -# Environment Variable Persisting - -- [Environment Variable Persisting](#environment-variable-persisting) - - -## Function Description - -In a system container, you can make the **env** variable persistent to the configuration file in the rootfs directory of the container by specifying the **--env-target-file** interface parameter. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--env-target-file

-
  • Variable of the string type.
  • The env persistent file must be in the rootfs directory and must be an absolute path.
-
- -## Constraints - -- If the target file specified by **--env-target-file** exists, the size cannot exceed 10 MB. -- The parameter specified by **--env-target-file** must be an absolute path in the rootfs directory. -- If the value of **--env** conflicts with that of **env** in the target file, the value of **--env** prevails. - -## Example - -Start a system container and specify the **env** environment variable and **--env-target-file** parameter. - -``` -[root@localhost ~]# isula run -tid -e abc=123 --env-target-file /etc/environment --system-container --external-rootfs /root/myrootfs none init -b75df997a64da74518deb9a01d345e8df13eca6bcc36d6fe40c3e90ea1ee088e -[root@localhost ~]# isula exec b7 cat /etc/environment -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -TERM=xterm -abc=123 -``` - -The preceding information indicates that the **env** variable \(**abc=123**\) of the container has been made persistent to the **/etc/environment** configuration file. - diff --git a/docs/en/docs/Container/figures/en-us_image_0183048952.png b/docs/en/docs/Container/figures/en-us_image_0183048952.png deleted file mode 100644 index 970d6bef8b11c3a135a5df4ee3920f7dca647ce5..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/figures/en-us_image_0183048952.png and /dev/null differ diff --git a/docs/en/docs/Container/figures/en-us_image_0221924926.png b/docs/en/docs/Container/figures/en-us_image_0221924926.png deleted file mode 100644 index 62ef0decdf6f1e591059904001d712a54f727e68..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/figures/en-us_image_0221924926.png and /dev/null differ diff --git a/docs/en/docs/Container/figures/en-us_image_0221924927.png b/docs/en/docs/Container/figures/en-us_image_0221924927.png deleted file mode 100644 index ad5ed3f7beeb01e6a48707c4806606b41d687e22..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/figures/en-us_image_0221924927.png and /dev/null differ diff --git a/docs/en/docs/Container/figures/isula-build_arch.png b/docs/en/docs/Container/figures/isula-build_arch.png deleted file mode 100644 index 911a9ae6f46988586ab49f15de282948f5470c37..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/figures/isula-build_arch.png and /dev/null differ diff --git a/docs/en/docs/Container/figures/relationship-between-the-secure-container-and-peripheral-components.png b/docs/en/docs/Container/figures/relationship-between-the-secure-container-and-peripheral-components.png deleted file mode 100644 index 454fc025ecb88fef09472eef7cb29ca7a8164856..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/figures/relationship-between-the-secure-container-and-peripheral-components.png and /dev/null differ diff --git a/docs/en/docs/Container/figures/secure-container.png b/docs/en/docs/Container/figures/secure-container.png deleted file mode 100644 index 2e8b48bdbd0766ec513e0654212cd16613eff826..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/figures/secure-container.png and /dev/null differ diff --git a/docs/en/docs/Container/image-management-1.md b/docs/en/docs/Container/image-management-1.md deleted file mode 100644 index 31f4cf824ada16d011168db5107e020df3e3bed9..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/image-management-1.md +++ /dev/null @@ -1,59 +0,0 @@ -# Image Management - -- [Image Management](#image-management-1) - - [Creating an Image](#creating-an-image) - - [Viewing Images](#viewing-images) - - [Deleting Images](#deleting-images) - - -## Creating an Image - -You can use the **docker pull**, **docker build**,** docker commit**, **docker import**, or **docker load** command to create an image. For details about how to use these commands, see [4.6.3 Image Management](#image-management-43.md#EN-US_TOPIC_0184808261). - -### Precautions - -1. Do not concurrently run the **docker load** and **docker rmi** commands. If both of the following conditions are met, concurrency problems may occur: - - - An image exists in the system. - - The docker rmi and docker load operations are concurrently performed on an image. - - Therefore, avoid this scenario. \(All concurrent operations between the image creation operations such as running the **tag**, **build**, and **load**, and **rmi** commands, may cause similar errors. Therefore, do not concurrently perform these operations with **rmi**.\) - -2. If the system is powered off when docker operates an image, the image may be damaged. In this case, you need to manually restore the image. - - When the docker operates images \(using the **pull**, **load**, **rmi**, **build**, **combine**, **commit**, or **import** commands\), image data operations are asynchronous, and image metadata is synchronous. Therefore, if the system power is off when not all image data is updated to the disk, the image data may be inconsistent with the metadata. Users can view images \(possibly none images\), but cannot start containers, or the started containers are abnormal. In this case, run the **docker rmi** command to delete the image and perform the previous operations again. The system can be recovered. - -3. Do not store a large number of images on nodes in the production environment. Delete unnecessary images in time. - - If the number of images is too large, the execution of commands such as **docker image** is slow. As a result, the execution of commands such as **docker build** or **docker commit** fails, and the memory may be stacked. In the production environment, delete unnecessary images and intermediate process images in time. - -4. When the **--no-parent** parameter is used to build images, if multiple build operations are performed at the same time and the FROM images in the Dockerfile are the same, residual images may exist. There are two cases: - - If FROM images are incomplete, the images generated when images of FROM are running may remain. Names of the residual images are similar to **base\_v1.0.0-app\_v2.0.0**, or they are none images. - - If the first several instructions in the Dockerfile are the same, none images may remain. - - -### None Image May Be Generated - -1. A none image is the top-level image without a tag. For example, the image ID of **ubuntu** has only one tag **ubuntu**. If the tag is not used but the image ID is still available, the image ID becomes a none image. -2. An image is protected because the image data needs to be exported during image saving. However, if a deletion operation is performed, the image may be successfully untagged and the image ID may fail to be deleted \(because the image is protected\). As a result, the image becomes a none image. -3. If the system is powered off when you run the **docker pull** command or the system is in panic, a none image may be generated. To ensure image integrity, you can run the **docker rmi** command to delete the image and then restart it. -4. If you run the **docker save** command to save an image and specify the image ID as the image name, the loaded image does not have a tag and the image name is **none**. - -### A Low Probability That Image Fails to Be Built If the Image Is Deleted When Being Built - -Currently, the image build process is protected by reference counting. After an image is built, reference counting of the image is increased by 1 \(holdon operation\). Once the holdon operation is successful, the image will not be deleted. However, there is a low probability that before the holdon operation is performed, the image can still be deleted, causing the image build failure. - -## Viewing Images - -Run the following command to view the local image list: - -``` -docker images -``` - -## Deleting Images - -### Precautions - -Do not run the **docker rmi –f **_XXX_ command to delete images. If you forcibly delete an image, the **docker rmi** command ignores errors during the process, which may cause residual metadata of containers or images. If you delete an image in common mode and an error occurs during the deletion process, the deletion fails and no metadata remains. - diff --git a/docs/en/docs/Container/image-management-2.md b/docs/en/docs/Container/image-management-2.md deleted file mode 100644 index 2d7fa0077fa033caef470e3291f83ad279d39091..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/image-management-2.md +++ /dev/null @@ -1,512 +0,0 @@ -# Image Management - -- [Image Management](#image-management-2) - - [build](#build) - - [history](#history) - - [images](#images) - - [import](#import) - - [load](#load) - - [login](#login) - - [logout](#logout) - - [pull](#pull) - - [push](#push) - - [rmi](#rmi) - - [save](#save) - - [search](#search) - - [tag](#tag) - - -   - - - -## build - -Syntax: **docker build \[**_options_**\]** _path_ **|** _URL_ **| -** - -Function: Builds an image using the Dockerfile in the specified path. - -Parameter description: Common parameters are as follows. For details about more parameters, see the **docker help build** command section. - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

--force-rm=false

-

Deletes containers generated during the build process even if the build is not successful.

-

--no-cache=false

-

Builds cache without using cache.

-

-q, --quiet=false

-

Prevents the redundant information generation during the build.

-

--rm=true

-

Deletes the container generated during the build after the build is successful.

-

-t, --tag=""

-

Tag name of the image generated during the build.

-

--build-arg=[]

-

Configures the build parameters.

-

--label=[]

-

Image-related parameters. The description of each parameter is similar to that of the create command.

-

--isolation

-

Container isolation method.

-

--pull

-

Obtains the latest image during the build.

-
- -**Dockerfile Command** - -Dockerfile is used to describe how to build an image and automatically build a container. The format of all **Dockerfile** commands is _instruction_ _arguments_. - -   - -**FROM Command** - -Syntax: **FROM** _image_ or **FROM** _image_:_tag_ - -Function: Specifies a basic image, which is the first command for all Dockerfile files. If the tag of a basic image is not specified, the default tag name **latest** is used. - -   - -**RUN Command** - -Syntax: **RUN** _command_ \(for example, **run in a shell - \`/bin/sh -c\`**\) or - -**RUN \[**_executable_, _param1_, _param2_ ... **\]** \(in the **exec** command format\) - -Function: Runs any command in the image specified by the **FROM** command and then commits the result. The committed image can be used in later commands. The **RUN** command is equivalent to: - -**docker run** _image_ _command_ - -**docker commit** _container\_id_ - -   - -**Remarks** - -The number sign \(\#\) is used to comment out. - -   - -**MAINTAINER Command** - -Syntax: **MAINTAINER **_name_ - -Function: Specifies the name and contact information of the maintenance personnel. - -   - -**ENTRYPOINT Command** - -Syntax: **ENTRYPOINT cmd **_param1 param2..._ or **ENTRYPOINT \[**_"cmd", "param1", "param2"..._**\]** - -Function: Configures the command to be executed during container startup. - -   - -**USER Command** - -Syntax: **USER **_name_ - -Function: Specifies the running user of memcached. - -   - -**EXPOSE Command** - -Syntax: **EXPOSE **_port_** \[**_port_**...\]** - -Function: Enables one or more ports for images. - -   - -**ENV Command** - -Syntax: **ENV**_ key value_ - -Function: Configures environment variables. After the environment variables are configured, the **RUN** commands can be subsequently used. - -   - -**ADD Command** - -Syntax: **ADD**_ src dst_ - -Function: Copies a file from the _src_ directory to the _dest_ directory of a container. _src_ indicates the relative path of the source directory to be built. It can be the path of a file or directory, or a remote file URL. _dest_ indicates the absolute path of the container. - -   - -**VOLUME Command** - -Syntax: **VOLUME \["**_mountpoint_**"\]** - -Function: Creates a mount point for sharing a directory. - -   - -**WORKDIR Command** - -Syntax: **workdir **_path_ - -Function: Runs the **RUN**, **CMD**, and **ENTRYPOINT** commands to set the current working path. The current working path can be set multiple times. If the current working path is a relative path, it is relative to the previous **WORKDIR** command. - -   - -**CMD command** - -Syntax: **CMD \[**_"executable","param1","param2"_**\]** \(This command is similar to the **exec** command and is preferred.\) - -**CMD \["**_param1_**","**_param2_**"\]** \(The parameters are the default parameters for ENTRYPOINT.\) - -**CMD** _command_ _param1_ _param2_ \(This command is similar to the **shell** command.\) - -Function: A Dockerfile can contain only one CMD command. If there are multiple CMD commands, only the last one takes effect. - -   - -**ONBUILD Commands** - -Syntax: **ONBUILD \[**_other commands_**\]** - -Function: This command is followed by other commands, such as the **RUN** and **COPY** commands. This command is not executed during image build and is executed only when the current image is used as the basic image to build the next-level image. - -The following is a complete example of the Dockerfile command that builds an image with the sshd service installed. - - - - - -
FROM busybox
-ENV  http_proxy http://192.168.0.226:3128
-ENV  https_proxy https://192.168.0.226:3128
-RUN apt-get update && apt-get install -y openssh-server
-RUN mkdir -p /var/run/sshd
-EXPOSE 22
-ENTRYPOINT /usr/sbin/sshd -D
-
- -Example: - -1. Run the following command to build an image using the preceding Dockerfile: - - ``` - $ sudo docker build -t busybox:latest - ``` - -2. Run the following command to view the generated image: - - ``` - docker images | grep busybox - ``` - - -## history - -Syntax: **docker history \[**_options_**\]** _image_ - -Function: Displays the change history of an image. - -Parameter description: - --H, --human=true - -**--no-trunc=false**: Does not delete any output. - -**-q** and **--quiet=false**: Display only IDs. - -Example: - -``` -$ sudo docker history busybox:test -IMAGE CREATED CREATED BY SIZE COMMENT -be4672959e8b 15 minutes ago bash 23B -21970dfada48 4 weeks ago 128MB Imported from - -``` - -   - -## images - -Syntax: **docker images \[**_options_**\] \[**_name_**\]** - -Function: Lists existing images. The intermediate image is not displayed if no parameter is configured. - -Parameter description: - -**-a** and **--all=false**: Display all images. - -**-f** and **--filter=\[\]**: Specify a filtering value, for example, **dangling=true**. - -**--no-trunc=false**: Does not delete any output. - -**-q** and **--quiet=false**: Display only IDs. - -Example: - -``` -$ sudo docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox latest e02e811dd08f 2 years ago 1.09MB -``` - -   - -## import - -Syntax: **docker import URL|- \[**_repository_**\[**_:tag_**\]\]** - -Function: Imports a .tar package that contains rootfs as an image. This parameter corresponds to the **docker export** command. - -Parameter description: none. - -Example: - -Run the following command to generate a new image for **busybox.tar** exported using the **docker export** command: - -``` -$ sudo docker import busybox.tar busybox:test -sha256:a79d8ae1240388fd3f6c49697733c8bac4d87283920defc51fb0fe4469e30a4f -$ sudo docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox test a79d8ae12403 2 seconds ago 1.3MB -``` - -   - -## load - -Syntax: **docker load \[**_options_**\]** - -Function: Reloads an image from .tar package obtained by running the **docker save** command. This parameter corresponds to the **docker save** command. - -Parameter description: - -**-i** and **--input=""** can be used. - -Example: - -``` -$ sudo docker load -i busybox.tar -Loaded image ID: sha256:e02e811dd08fd49e7f6032625495118e63f597eb150403d02e3238af1df240ba -$ sudo docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox latest e02e811dd08f 2 years ago 1.09MB -``` - -## login - -Syntax: **docker login \[**_options_**\] \[**_server_**\]** - -Function: Logs in to an image server. If no server is specified, the system logs in to **https://index.docker.io/v1/** by default. - -Parameter description: - -**-e** and **--email=""**: Email address. - -**-p** and **--password=""**: Password. - -**-u** and **--username=""**: User name. - -Example: - -``` -$ sudo docker login -``` - -## logout - -Syntax: **docker logout \[**_server_**\]** - -Function: Logs out of an image server. If no server is specified, the system logs out of **https://index.docker.io/v1/** by default. - -Parameter description: none. - -Example: - -``` -$ sudo docker logout -``` - -## pull - -Syntax: **docker pull \[**_options_**\]** _name_**\[**_:tag_**\]** - -Function: Pulls an image from an official or private registry. - -Parameter description: - -**-a** and **--all-tags=false**: Download all images in a registry. \(A registry can be tagged with multiple tags. For example, a busybox registry may have multiple tags, such as **busybox:14.04**, **busybox:13.10**, **busybox:latest**. If **-a** is used, all busybox images with tags are pulled.\) - -Example: - -1. Run the following command to obtain the Nginx image from the official registry: - - ``` - $ sudo docker pull nginx - Using default tag: latest - latest: Pulling from official/nginx - 94ed0c431eb5: Pull complete - 9406c100a1c3: Pull complete - aa74daafd50c: Pull complete - Digest: sha256:788fa27763db6d69ad3444e8ba72f947df9e7e163bad7c1f5614f8fd27a311c3 - Status: Downloaded newer image for nginx:latest - ``` - - When an image is pulled, the system checks whether the dependent layer exists. If yes, the local layer is used. - -2. Pull an image from a private registry. - - Run the following command to pull the Fedora image from the private registry, for example, the address of the private registry is **192.168.1.110:5000**: - - ``` - $ sudo docker pull 192.168.1.110:5000/fedora - ``` - - -## push - -Syntax: **docker push** _name_**\[**_:tag_**\]** - -Function: Pushes an image to the image registry. - -Parameter description: none. - -Example: - -1. Run the following command to push an image to the private image registry at 192.168.1.110:5000. -2. Label the image to be pushed. \(The **docker tag** command is described in the following section.\) In this example, the image to be pushed is busybox:sshd. - - ``` - $ sudo docker tag ubuntu:sshd 192.168.1.110:5000/busybox:sshd - ``` - -3. Run the following command to push the tagged image to the private image registry: - - ``` - $ sudo docker push 192.168.1.110:5000/busybox:sshd - ``` - - During the push, the system automatically checks whether the dependent layer exists in the image registry. If yes, the layer is skipped. - - -## rmi - -Syntax: **docker rmi \[**_options_**\] **_image _**\[**_image..._**\]** - -Function: Deletes one or more images. If an image has multiple tags in the image library, only the untag operation is performed when the image is deleted. If the image has only one tag, the dependent layers are deleted in sequence. - -Parameter description: - -**-f** and **--force=false**: Forcibly delete an image. - -**--no-prune=false**: Does not delete parent images without tags. - -Example: - -``` -$ sudo docker rmi 192.168.1.110:5000/busybox:sshd -``` - -## save - -Syntax: **docker save \[**_options_**\] **_image _**\[**_image..._**\]** - -Function: Saves an image to a TAR package. The output is **STDOUT** by default. - -Parameter description: - -**-o** and **--output=""**: Save an image to a file rather than STDOUT. - -Example: - -``` -$ sudo docker save -o nginx.tar nginx:latest -$ ls -nginx.tar -``` - -## search - -Syntax: **docker search **_options_ _TERM_ - -Function: Searches for a specific image in the image registry. - -Parameter description: - -**--automated=false**: Displays the automatically built image. - -**--no-trunc=false**: Does not delete any output. - -**-s** and **--stars=0**: Display only images of a specified star level or higher. - -Example: - -1. Run the following command to search for Nginx in the official image library: - - ``` - $ sudo docker search nginx - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - nginx Official build of Nginx. 11873 [OK] - jwilder/nginx-proxy Automated Nginx reverse proxy for docker con... 1645 [OK] - richarvey/nginx-php-fpm Container running Nginx + PHP-FPM capable of... 739 [OK] - linuxserver/nginx An Nginx container, brought to you by LinuxS... 74 - bitnami/nginx Bitnami nginx Docker Image 70 [OK] - tiangolo/nginx-rtmp Docker image with Nginx using the nginx-rtmp... 51 [OK] - ``` - -    - -2. Run the following command to search for busybox in the private image library. The address of the private image library must be added during the search. - - ``` - $ sudo docker search 192.168.1.110:5000/busybox - ``` - - -## tag - -Syntax: **docker tag \[**_options_**\] **_image_**\[**_:tag_**\] \[**_registry host/_**\]\[**_username/_**\]**_name_**\[**_:tag_**\]** - -Function: Tags an image to a registry. - -Parameter description: - -**-f** or **--force=false**: Forcibly replaces the original image when the same tag name exists. - -Example: - -``` -$ sudo docker tag busybox:latest busybox:test -``` - diff --git a/docs/en/docs/Container/image-management.md b/docs/en/docs/Container/image-management.md deleted file mode 100644 index 415f012bbd0b035529ebe1d1302946cb29a96804..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/image-management.md +++ /dev/null @@ -1,376 +0,0 @@ -# Image Management - -- [Image Management](#image-management) - - [Docker Image Management](#docker-image-management) - - [Logging In to a Registry](#logging-in-to-a-registry) - - [Logging Out of a Registry](#logging-out-of-a-registry) - - [Pulling Images from a Registry](#pulling-images-from-a-registry) - - [Deleting Images](#deleting-images) - - [Loading Images](#loading-images) - - [Listing Images](#listing-images) - - [Inspecting Images](#inspecting-images) - - [Two-Way Authentication](#two-way-authentication) - - [Embedded Image Management](#embedded-image-management) - - [Loading Images](#loading-images-3) - - [Listing Images](#listing-images-4) - - [Inspecting Images](#inspecting-images-5) - - [Deleting Images](#deleting-images-6) - - -## Docker Image Management - - - - -### Logging In to a Registry - -#### Description - -The **isula login** command is run to log in to a registry. After successful login, you can run the **isula pull** command to pull images from the registry. If the registry does not require a password, you do not need to run this command before pulling images. - -#### Usage - -``` -isula login [OPTIONS] SERVER -``` - -#### Parameters - -For details about parameters in the **login** command, see [Table 1](#command-line-parameters.md#en-us_topic_0189976507_table2711184314112). - -#### Example - -``` -$ isula login -u abc my.csp-edge.com:5000 - -Login Succeeded -``` - -### Logging Out of a Registry - -#### Description - -The **isula logout** command is run to log out of a registry. If you run the **isula pull** command to pull images from the registry after logging out of the system, the image will fail to be pulled because you are not authenticated. - -#### Usage - -``` -isula logout SERVER -``` - -#### Parameters - -For details about parameters in the **logout** command, see [Table 2](#command-line-parameters.md#en-us_topic_0189976507_table184058282137). - -#### Example - -``` -$ isula logout my.csp-edge.com:5000 -Logout Succeeded -``` - -### Pulling Images from a Registry - -#### Description - -Pull images from a registry to the local host. - -#### Usage - -``` -isula pull [OPTIONS] NAME[:TAG|@DIGEST] -``` - -#### Parameters - -For details about parameters in the **pull** command, see [Table 3](#command-line-parameters.md#en-us_topic_0189976507_table157501230181515). - -#### Example - -``` -$ isula pull localhost:5000/official/busybox -Image "localhost:5000/official/busybox" pulling -Image "localhost:5000/official/busybox@sha256:bf510723d2cd2d4e3f5ce7e93bf1e52c8fd76831995ac3bd3f90ecc866643aff" pulled -``` - -### Deleting Images - -#### Description - -Delete one or more images. - -#### Usage - -``` -isula rmi [OPTIONS] IMAGE [IMAGE...] -``` - -#### Parameters - -For details about parameters in the **rmi** command, see [Table 4](#command-line-parameters.md#en-us_topic_0189976507_table856181871617). - -#### Example - -``` -$ isula rmi rnd-dockerhub.huawei.com/official/busybox -Image "rnd-dockerhub.huawei.com/official/busybox" removed -``` - -### Loading Images - -#### Description - -Load images from a .tar package. The .tar package must be exported by using the **docker save** command or must be in the same format. - -#### Usage - -``` -isula load [OPTIONS] -``` - -#### Parameters - -For details about parameters in the **load** command, see [Table 5](#command-line-parameters.md#en-us_topic_0189976507_table99761512187). - -#### Example - -``` -$ isula load -i busybox.tar -Load image from "/root/busybox.tar" success -``` - -### Listing Images - -#### Description - -List all images in the current environment. - -#### Usage - -``` -isula images -``` - -#### Parameters - -For details about parameters in the **images** command, see [Table 6](#command-line-parameters.md#en-us_topic_0189976507_table1698717275206). - -#### Example - -``` -$ isula images -REF IMAGE ID CREATED SIZE -rnd-dockerhub.huawei.com/official/busybox:latest e4db68de4ff2 2019-06-15 08:19:54 1.376 MB -``` - -### Inspecting Images - -#### Description - -After the configuration information of an image is returned, you can use the **-f** parameter to filter the information as needed. - -#### Usage - -``` -isula inspect [options] CONTAINER|IMAGE [CONTAINER|IMAGE...] -``` - -#### Parameters - -For details about parameters in the **inspect** command, see [Table 7](#command-line-parameters.md#en-us_topic_0189976507_table73237211516). - -#### Example - -``` -$ isula inspect -f "{{json .image.id}}" rnd-dockerhub.huawei.com/official/busybox -"e4db68de4ff27c2adfea0c54bbb73a61a42f5b667c326de4d7d5b19ab71c6a3b" -``` - -### Two-Way Authentication - -#### Description - -After this function is enabled, iSulad and image repositories communicate over HTTPS. Both iSulad and image repositories verify the validity of each other. - -#### Usage - -The corresponding registry needs to support this function and iSulad needs to be configured as follows: - -1. Modify iSulad configuration \(default path: **/etc/isulad/daemon.json**\) and set **use-decrypted-key** to **false**. -2. Place related certificates in the folder named after the registry in the **/etc/isulad/certs.d** directory. For details about how to generate certificates, visit the official Docker website: - - [https://docs.docker.com/engine/security/certificates/](https://docs.docker.com/engine/security/certificates/) - - [https://docs.docker.com/engine/security/https/](https://docs.docker.com/engine/security/https/) - - -1. Run the **systemctl restart isulad** command to restart iSulad. - -#### Parameters - -Parameters can be configured in the **/etc/isulad/daemon.json** file or carried when iSulad is started. - -``` -isulad --use-decrypted-key=false -``` - -#### Example - -Set **use-decrypted-key** to **false**. - -``` -$ cat /etc/isulad/daemon.json -{ - "group": "isulad", - "graph": "/var/lib/isulad", - "state": "/var/run/isulad", - "engine": "lcr", - "log-level": "ERROR", - "pidfile": "/var/run/isulad.pid", - "log-opts": { - "log-file-mode": "0600", - "log-path": "/var/lib/isulad", - "max-file": "1", - "max-size": "30KB" - }, - "log-driver": "stdout", - "hook-spec": "/etc/default/isulad/hooks/default.json", - "start-timeout": "2m", - "storage-driver": "overlay2", - "storage-opts": [ - "overlay2.override_kernel_check=true" - ], - "registry-mirrors": [ - "docker.io" - ], - "insecure-registries": [ - "rnd-dockerhub.huawei.com" - ], - "pod-sandbox-image": "", - "image-opt-timeout": "5m", - "native.umask": "secure", - "network-plugin": "", - "cni-bin-dir": "", - "cni-conf-dir": "", - "image-layer-check": false, - "use-decrypted-key": false, - "insecure-skip-verify-enforce": false -} -``` - -Place the certificate in the corresponding directory. - -``` -$ pwd -/etc/isulad/certs.d/my.csp-edge.com:5000 -$ ls -ca.crt tls.cert tls.key -``` - -Restart iSulad. - -``` -$ systemctl restart isulad -``` - -Run the **pull** command to download images from the registry: - -``` -$ isula pull my.csp-edge.com:5000/busybox -Image "my.csp-edge.com:5000/busybox" pulling -Image "my.csp-edge.com:5000/busybox@sha256:f1bdc62115dbfe8f54e52e19795ee34b4473babdeb9bc4f83045d85c7b2ad5c0" pulled -``` - -## Embedded Image Management - -### Loading Images - -#### Description - -Load images based on the **manifest** files of embedded images. The value of **--type** must be set to **embedded**. - -#### Usage - -``` -isula load [OPTIONS] --input=FILE --type=TYPE -``` - -#### Parameters - -For details about parameters in the **load** command, see [Table 5](#command-line-parameters.md#en-us_topic_0189976507_table99761512187). - -#### Example - -``` -$ isula load -i test.manifest --type embedded -Load image from "/root/work/bugfix/tmp/ci_testcase_data/embedded/img/test.manifest" success -``` - -### Listing Images - -#### Description - -List all images in the current environment. - -#### Usage - -``` -isula images [OPTIONS] -``` - -#### Parameters - -For details about parameters in the **images** command, see [Table 6](#command-line-parameters.md#en-us_topic_0189976507_table1698717275206). - -#### Example - -``` -$ isula images -REF IMAGE ID CREATED SIZE -test:v1 9319da1f5233 2018-03-01 10:55:44 1.273 MB -``` - -### Inspecting Images - -#### Description - -After the configuration information of an image is returned, you can use the **-f** parameter to filter the information as needed. - -#### Usage - -``` -isula inspect [options] CONTAINER|IMAGE [CONTAINER|IMAGE...] -``` - -#### Parameters - -For details about parameters in the **inspect** command, see [Table 7](#command-line-parameters.md#en-us_topic_0189976507_table73237211516). - -#### Example - -``` -$ isula inspect -f "{{json .created}}" test:v1 -"2018-03-01T15:55:44.322987811Z" -``` - -### Deleting Images - -#### Description - -Delete one or more images. - -#### Usage - -``` -isula rmi [OPTIONS] IMAGE [IMAGE...] -``` - -#### Parameters - -For details about parameters in the **rmi** command, see [Table 4](#command-line-parameters.md#en-us_topic_0189976507_table856181871617). - -#### Example - -``` -$ isula rmi test:v1 -Image "test:v1" removed -``` - diff --git a/docs/en/docs/Container/installation-and-deployment-1.md b/docs/en/docs/Container/installation-and-deployment-1.md deleted file mode 100644 index fc7f9f9add7f8efce8ea207abc9b81a383bd427b..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/installation-and-deployment-1.md +++ /dev/null @@ -1,126 +0,0 @@ -# Installation and Deployment - -- [Installation and Deployment](#installation-and-deployment-1) - - [Installation Methods](#installation-methods-26) - - [Deployment Configuration](#deployment-configuration-27) - - [Configuring the Docker Engine](#configuring-the-docker-engine) - - [iSulad Configuration](#isulad-configuration) - - [Configuration.toml](#configuration-toml) - - -## Installation Methods - -### Prerequisites - -- For better performance experience, a secure container needs to run on the bare metal server and must not run on VMs. -- A secure container depends on the following components \(openEuler 1.0 version\). Ensure that the required components have been installed in the environment. To install iSulad, refer to [Installation Methods](#installation-methods.md). - - docker-engine - - qemu - - -### Installation Procedure - -Released secure container components are integrated in the **kata-containers-**_version_**.rpm** package. You can run the **rpm** command to install the corresponding software. - -``` -rpm -ivh kata-containers-.rpm -``` - -## Deployment Configuration - -### Configuring the Docker Engine - -To enable the Docker engine to support kata-runtime, perform the following steps to configure the Docker engine: - -1. Ensure that all software packages \(**docker-engine** and **kata-containers**\) have been installed in the environment. -2. Stop the Docker engine. - - ``` - systemctl stop docker - ``` - -3. Modify the configuration file **/etc/docker/daemon.json** of the Docker engine and add the following configuration: - - ``` - { - "runtimes": { - "kata-runtime": { - "path": "/usr/bin/kata-runtime", - "runtimeArgs": [ - "--kata-config", - "/usr/share/defaults/kata-containers/configuration.toml" - ] - } - } - } - ``` - -4. Restart the Docker engine. - - ``` - systemctl start docker - ``` - - -### iSulad Configuration - -To enable the iSulad to support the new container runtime kata-runtime, perform the following steps which are similar to those for the container engine docker-engine: - -1. Ensure that all software packages \(iSulad and kata-containers\) have been installed in the environment. -2. Stop iSulad. - - ``` - systemctl stop isulad - ``` - -3. Modify the **/etc/isulad/daemon.json** configuration file of the iSulad and add the following configurations: - - ``` - { - "runtimes": { - "kata-runtime": { - "path": "/usr/bin/kata-runtime", - "runtime-args": [ - "--kata-config", - "/usr/share/defaults/kata-containers/configuration.toml" - ] - } - } - } - ``` - -4. Restart iSulad. - - ``` - systemctl start isulad - ``` - - -### Configuration.toml - -The secure container provides a global configuration file configuration.toml. Users can also customize the path and configuration options of the secure container configuration file. - -In the **runtimeArges** field of Docker engine, you can use **--kata-config** to specify a private file. The default configuration file path is **/usr/share/defaults/kata-containers/configuration.toml**. - -The following lists the common fields in the configuration file. For details about the configuration file options, see [configuration.toml](#configuration-toml-31.md). - -1. hypervisor.qemu - - **path**: specifies the execution path of the virtualization QEMU. - - **kernel**: specifies the execution path of the guest kernel. - - **initrd**: specifies the guest initrd execution path. - - **machine\_type**: specifies the type of the analog chip. The value is **virt** for the ARM architecture and **pc** for the x86 architecture. - - **kernel\_params**: specifies the running parameters of the guest kernel. - -2. proxy.kata - - **path**: specifies the kata-proxy running path. - - **enable\_debug**: enables the debugging function for the kata-proxy process. - -3. agent.kata - - **enable\_blk\_mount**: enables guest mounting of the block device. - - **enable\_debug**: enables the debugging function for the kata-agent process. - -4. runtime - - **enable\_cpu\_memory\_hotplug**: enables CPU and memory hot swap. - - **enable\_debug**: enables debugging for the kata-runtime process. - - diff --git a/docs/en/docs/Container/installation-and-deployment-2.md b/docs/en/docs/Container/installation-and-deployment-2.md deleted file mode 100644 index e001954b7c15c4d9daa8d8ab930826bf67c20bcf..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/installation-and-deployment-2.md +++ /dev/null @@ -1,454 +0,0 @@ -# Installation and Deployment - -- [Installation and Deployment](#installation-and-deployment-2) - - [Precautions](#precautions) - - [Basic Installation Configuration](#basic-installation-configuration) - - [Daemon Parameter Configuration](#daemon-parameter-configuration) - - [Daemon Running Directory Configuration](#daemon-running-directory-configuration) - - [Daemon Network Configuration](#daemon-network-configuration) - - [Daemon umask Configuration](#daemon-umask-configuration) - - [Daemon Start Time](#daemon-start-time) - - [Journald Component](#journald-component) - - [Firewalld Component](#firewalld-component) - - [Iptables Component](#iptables-component) - - [Audit Component](#audit-component) - - [Security Configuration seccomp](#security-configuration-seccomp) - - [Do Not Modify Private Directory of Docker Daemon](#do-not-modify-private-directory-of-docker-daemon) - - [Precautions for Common Users in the Scenario Where a Large Number of Containers Are Deployed](#precautions-for-common-users-in-the-scenario-where-a-large-number-of-containers-are-deployed) - - [Storage Driver Configuration](#storage-driver-configuration) - - [overlay2 Storage Driver Configuration](#overlay2-storage-driver-configuration) - - [devicemapper Storage Driver Configuration](#devicemapper-storage-driver-configuration-35) - - [Impact of Forcibly Killing Docker Background Processes](#impact-of-forcibly-killing-docker-background-processes) - - [Semaphores May Be Residual](#semaphores-may-be-residual) - - [NICs May Be Residual](#nics-may-be-residual) - - [Failed to Restart a Container](#failed-to-restart-a-container) - - [Failed to Restart the Docker Service](#failed-to-restart-the-docker-service) - - [Impact of System Power-off](#impact-of-system-power-off) - - - -## Precautions - -- The **docker-engine** RPM package cannot be installed together with the **containerd**, **runc**, or **podman** RPM package. This is because the **docker-engine** RPM package contains all components required for Docker running, including **containerd**, **runc**, and **docker** binary files. Yet the **containerd**, **runc**, and **podman** RPM packages also contain the corresponding binary files. Software package conflicts may occur due to repeated installation. - -## Basic Installation Configuration - - - -### Daemon Parameter Configuration - -You can add configuration items to the **/etc/docker/daemon.json** file to customize parameters. You can run the **dockerd --help** command to view related configuration items and their usage methods. A configuration example is as follows: - -``` -cat /etc/docker/daemon.json -{ - "debug": true, - "storage-driver": "overlay2", - "storage-opts": ["overlay2.override_kernel_check=true"] -} -``` - -### Daemon Running Directory Configuration - -Re-configuring various running directories and files \(including **--graph** and **--exec-root**\) may cause directory conflicts or file attribute changes, affecting the normal use of applications. - ->![](./public_sys-resources/icon-notice.gif) **NOTICE:** ->Therefore, the specified directories or files should be used only by Docker to avoid file attribute changes and security issues caused by conflicts. - -- Take **--graph** as an example. When **/new/path/** is used as the new root directory of the daemon, if a file exists in **/new/path/** and the directory or file name conflicts with that required by Docker \(for example, **containers**, **hooks**, and **tmp**\), Docker may update the original directory or file attributes, including the owner and permission. - ->![](./public_sys-resources/icon-notice.gif) **NOTICE:** ->From Docker 17.05, the **--graph** parameter is marked as **Deprecated** and replaced with the **--data-root** parameter. - -### Daemon Network Configuration - -- After the network segment of the docker0 bridge is specified by using the **--bip** parameter on Docker daemon, if the **--bip** parameter is deleted during the next Docker daemon restart, the docker0 bridge uses the previous value of **--bip**, even if the docker0 bridge is deleted before the restart. The reason is that Docker saves the network configuration and restores the previous configuration by default during the next restart. -- When running the **docker network create** command to concurrently create networks, you can create two networks with the same name. The reason is that Docker networks are distinguished by IDs. The name is only an alias that is easy to identify and may not be unique. -- In the Docker bridge network mode, a Docker container establishes external communication through NAT on the host. When Docker daemon starts a Docker container, a docker-proxy process is started for each port mapped on the host to access the proxy. It is recommended that you map only the necessary ports when using userland-proxy to reduce the resources consumed by the port mapping of docker-proxy. - -### Daemon umask Configuration - -The default **umask** value of the main container process and exec process is **0022**. To meet security specifications and prevent containers from being attacked, the default value of **umask** is changed to **0027** after runC implementation is modified. After the modification, the other groups cannot access new files or directories. - -The default value of **umask** is **0027** when Docker starts a container. You can change the value to **0022** by running the **--exec-opt native.umask=normal** command during container startup. - ->![](./public_sys-resources/icon-notice.gif) **NOTICE:** ->If **native.umask** is configured in **docker create** or **docker run** command, its value is used. - -For details, see the parameter description in [4.6.2.4 create](#create.md#EN-US_TOPIC_0184808242) and [4.6.2.16 run](#container-management-40.md#EN-US_TOPIC_0184808238). - -### Daemon Start Time - -The Docker service is managed by systemd, which restricts the startup time of each service. If the Docker service fails to be started within the specified time, the possible causes are as follows: - -- If Docker daemon is started for the first time using devicemapper, the Docker daemon needs to perform the initialization operation on the device. This operation, however, will perform a large number of disk I/O operations. When the disk performance is poor or many I/O conflicts exist, the Docker daemon startup may time out. devicemapper needs to be initialized only once and does not need to be initialized again during later Docker daemon startup. -- If the usage of the current system resources is too high, the system responses slowly, all operations in the system slow down, and the startup of the Docker service may time out. -- During the restart, a daemon traverses and reads configuration files and the init layer and writable layer configurations of each container in the Docker working directory. If there are too many containers \(including the created and exited containers\) in the current system and the disk read and write performance is limited, the startup of the Docker service may time out due to the long-time daemon traversing. - -   - -If the service startup times out, you are advised to rectify the fault as follows: - -- Ensure that the container orchestration layer periodically deletes unnecessary containers, especially the exited containers. -- Based on performance requirements of the solution, adjust the cleanup period of the orchestration layer and the start time of the Docker service. - -### Journald Component - -After systemd-journald is restarted, Docker daemon needs to be restarted. Journald obtains the Docker daemon logs through a pipe. If the journald service is restarted, the pipe is disabled. The write operation of Docker logs triggers the SIGPIPE signal, which causes the Docker daemon crash. If this signal is ignored, the subsequent Docker daemon logs may fail to be recorded. Therefore, you are advised to restart Docker daemon after the journald service is restarted or becomes abnormal, ensuring that Docker logs can be properly recorded and preventing status exceptions caused by daemon crash. - -### Firewalld Component - -You need to restart the Docker service after restarting or starting firewalld. - -- When the firewalld service is started, the iptables rules of the current system are cleared. Therefore, if the firewalld service is restarted during Docker daemon startup, the Docker service may fail to insert iptables rules, causing the Docker service startup failure. -- If the firewalld service is restarted after the Docker service is started, or the status of the firewalld service \(service paused or resumed\) is changed, the iptables rules of the Docker service are deleted. As a result, the container with port mapping fails to be created. - -### Iptables Component - -If the **--icc=false** option is added in Docker, the communication between containers can be restricted. However, if the OS has some rules, the communication between containers may not be restricted. For example: - -``` -Chain FORWARD (policy ACCEPT 0 packets, 0 bytes) -... -0 0 ACCEPT icmp -- * * 0.0.0.0/0 0.0.0.0/0 -... -0 0 DROP all -- docker0 docker0 0.0.0.0/0 0.0.0.0/0 -... -``` - -In the **Chain FORWARD** command, the ACCEPT icmp rule is added to DROP. As a result, after the **--icc=false** option is added, containers can be pinged, but the peer end is unreachable if UDP or TCP is used. - -Therefore, if you want to add the **--icc=false** option when using Docker in a container OS, you are advised to clear iptables rules on the host first. - -### Audit Component - -You can configure audit for Docker. However, this configuration is not mandatory. For example: - -``` --w /var/lib/docker -k docker --w /etc/docker -k docker --w /usr/lib/systemd/system/docker.service -k docker --w /usr/lib/systemd/system/docker.socket -k docker --w /etc/sysconfig/docker -k docker --w /usr/bin/docker-containerd -k docker --w /usr/bin/docker-runc -k docker --w /etc/docker/daemon.json -k docker -``` - -Configuring audit for Docker brings certain benefits for auditing, while it does not have any substantial effects on attack defense. In addition, the audit configurations cause serious efficiency problems, for example, the system may not respond smoothly. Therefore, exercise caution in the production environment. - -The following uses **-w /var/lib/docker -k docker** as an example to describe how to configure Docker audit. - -``` -[root@localhost signal]# cat /etc/audit/rules.d/audit.rules | grep docker -w /var/lib/docker/ -k docker -[root@localhost signal]# auditctl -R /etc/audit/rules.d/audit.rules | grep docker -[root@localhost signal]# auditctl -l | grep docker -w /var/lib/docker/ -p rwxa -k docker -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->**-p \[r|w|x|a\]** and **-w** are used together to monitor the read, write, execution, and attribute changes \(such as timestamp changes\) of the directory. In this case, any file or directory operation in the **/var/lib/docker** directory will be recorded in the **audit.log** file. As a result, too many logs will be recorded in the **audit.log** file, which severely affects the memory or CPU usage of the auditd, and further affects the OS. For example, logs similar to the following will be recorded in the **/var/log/audit/audit.log** file each time the **ls /var/lib/docker/containers** command is executed: - -``` -type=SYSCALL msg=audit(1517656451.457:8097): arch=c000003e syscall=257 success=yes exit=3 a0=ffffffffffffff9c a1=1b955b0 a2=90800 a3=0 items=1 ppid=17821 pid=1925 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts6 ses=4 comm="ls" exe="/usr/bin/ls" subj=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 key="docker"type=CWD msg=audit(1517656451.457:8097): cwd="/root"type=PATH msg=audit(1517656451.457:8097): item=0 name="/var/lib/docker/containers" inode=1049112 dev=fd:00 mode=040700 ouid=0 ogid=0 rdev=00:00 obj=unconfined_u:object_r:container_var_lib_t:s0 objtype=NORMAL -``` - -   - -### Security Configuration seccomp - -During the container network performance test, it is found that the performance of Docker is lower than that of the native kernel namespace. After seccomp is enabled, system calls \(such as sendto\) are not performed through system\_call\_fastpath. Instead, tracesys is called, which greatly deteriorates the performance. Therefore, you are advised to disable seccomp in container scenarios where services require high performance. For example: - -``` -docker run -itd --security-opt seccomp=unconfined busybox:latest -``` - -### Do Not Modify Private Directory of Docker Daemon - -Do not modify the root directory used by Docker \(**/var/lib/docker** by default\), the directory during operation \(**/run/docker** by default\), or the files or directories in the two directories. The forbidden operations include deleting files, adding files, creating soft or hard links for the directories or files, or modifying attributes, permissions, or contents of the files. If any modification is required, contact the Euler container team for review. - -### Precautions for Common Users in the Scenario Where a Large Number of Containers Are Deployed - -The maximum number of processes that a common user can create on an OS host can be restricted by creating the **/etc/security/limits.d/20-nproc.conf** file in the system. Similarly, the maximum number of processes that a common user can create in a container is determined by the value in the **/etc/security/limits.d/20-nproc.conf** file in the container image, as shown in the following example: - -``` -cat /etc/security/limits.conf -* soft nproc 4096 -``` - -If an error is reported due to insufficient resources when a large number of containers are deployed by a common user, increase the value **4096** in the **/etc/security/limits.d/20-nproc.conf** file. - -Configure the maximum value based on the maximum capability of the kernel, as shown in the following example: - -``` -[root@localhost ~]# sysctl -a | grep pid_max -kernel.pid_max = 32768 -``` - -## Storage Driver Configuration - -This Docker version supports two storage drivers: overlay2 and devicemapper. Since overlay2 has better performance than devicemapper, it is recommended that overlay2 be preferentially used in the production environment. - - - -### overlay2 Storage Driver Configuration - -#### Configuration Methods - -overlay2 is the default storage driver of Docker. You can also use either of the following methods to check or configure the driver: - -- Edit the **/etc/docker/daemon.json** file to check or configure the **storage-driver** field. - - ``` - cat /etc/docker/daemon.json - { - "storage-driver": "overlay2" - } - ``` - - -- Edit the **/etc/sysconfig/docker-storage** file and check or configure the Docker daemon startup parameters. - - ``` - cat /etc/sysconfig/docker-storage - DOCKER_STORAGE_OPTIONS="--storage-driver=overlay2" - ``` - - -#### Precautions - -- When you perform lifecycle management operations on some containers, an error may be reported, indicating that the corresponding rootfs or executable file cannot be found. -- If the health check of a container is configured to execute executable files in the container, an error may be reported, which causes the health check failure of the container. - -- When you use overlay2 as the graphdriver and modify an image file in a container for the first time, the modification fails if the file size is greater than the remaining space of the system. Even if a little modification on the file is involved, the whole file must be copied to the upper layer. If the remaining space is insufficient, the modification fails. -- Compared with common file systems, the overlay2 file system has the following behavior differences: - - Kernel version - - overlay2 is compatible only with the native kernel 4.0 or later. You are advised to use the Ext4 file system. - - - Copy-UP performance - - Modifying files at the lower layer triggers file replication to the upper layer. Data block replication and fsync are time-consuming. - - - Rename directories - - The rename system call is allowed only when both the source and the destination paths are at the merged layer. Otherwise, the EXDEV error is reported. - - Kernel 4.10 introduces the redirect directory feature to fix this issue. The corresponding kernel option is **CONFIG\_OVERLAY\_FS\_REDIRECT\_DIR**. - - When overlay2 is used, a file system directory fails to be renamed because the related feature configured in the **/sys/module/overlay/parameters/redirect\_dir** file has been disabled. To use this feature, you need to manually set **/sys/module/overlay/parameters/redirect\_dir** to **Y**. - - - Hard link disconnection - - If there are multiple hard links in the lower-layer directory, writing data to the merged layer will trigger Copy-UP, resulting in hard link disconnection. - - The index feature is introduced in kernel 4.13 to fix this issue. The corresponding kernel option is **CONFIG\_OVERLAY\_FS\_INDEX**. Note that this option is not forward compatible and does not support hot upgrade. - - - Changes of **st\_dev** and **st\_ino** - - After Copy-UP is triggered, you can view only new files at the merged layer, and inodes change. Although **attr** and **xattr** can be replicated, **st\_dev** and **st\_ino** are unique and cannot be replicated. As a result, you can run **stat** and **ls** commands to check inode changes accordingly. - - - fd change - - Before Copy-UP is triggered, you can obtain the descriptor fd1 when opening a file in read-only mode. After Copy-UP is trigger, you can obtain the descriptor fd2 when opening the file with the same name. The two descriptors point to different files. The data written to fd2 is not displayed in fd1. - - - -#### Abnormal Scenarios - -When a container uses the overlay2 storage driver, mount points may be overwritten. - -   - -#### Abnormal Scenario: Mount Point Being Overwritten - -In the faulty container, there is a mount point in **/var/lib/docker/overlay2**. - -``` -[root@localhost ~]# mount -l | grep overlay -overlay on /var/lib/docker/overlay2/844fd3bca8e616572935808061f009d106a8748dfd29a0a4025645457fa21785/merged type overlay (rw,relatime,seclabel,lowerdir=/var/lib/docker/overlay2/l/JL5PZQLNDCIBU3ZOG3LPPDBHIJ:/var/lib/docker/overlay2/l/ELRPYU4JJG4FDPRLZJCZZE4UO6,upperdir=/var/lib/docker/overlay2/844fd3bca8e616572935808061f009d106a8748dfd29a0a4025645457fa21785/diff,workdir=/var/lib/docker/overlay2/844fd3bca8e616572935808061f009d106a8748dfd29a0a4025645457fa21785/work) -/dev/mapper/dm-root on /var/lib/docker/overlay2 type ext4 (rw,relatime,seclabel,data=ordered) -``` - -An error as follows may occur when some Docker commands are executed: - -``` -[root@localhost ~]# docker rm 1348136d32 -docker rm: Error response from daemon: driver "overlay2" failed to remove root filesystem for 1348136d32: error while removing /var/lib/docker/overlay2/844fd3bca8e616572935808061f009d106a8748dfd29a0a4025645457fa21785: invalid argument -``` - -You will find that the rootfs of the corresponding container cannot be found on the host. However, this does not mean that the rootfs is lost. The rootfs is overwritten by the mount point in **/var/lib/docker/overlay2**, and services are still running properly. The solutions are as follows: - -- Solution 1 - 1. Run the following command to check the graphdriver used by Docker: - - ``` - docker info | grep "Storage Driver" - ``` - -    - - 2. Run the following commands to query the current mount point: - - ``` - Devicemapper: mount -l | grep devicemapper - Overlay2: mount -l | grep overlay2 - ``` - - The output format is _A_ on _B_ type _C_ \(_D_\). - - - _A_: block device name or **overlay** - - _B_: mount point - - _C_: file system type - - _D_: mounting attribute - - 3. Run the **umount** command on the mount points \(_B_\) one by one from bottom to top. - 4. Run the **docker restart** command on all the containers or delete all the containers. - 5. Run the following command to restart Docker: - - ``` - systemctl restart docker - ``` - - - -- Solution 2 - 1. Migrate services. - 2. Restart nodes. - - -### devicemapper Storage Driver Configuration - -If you need to set the storage driver of Docker to devicemapper, you can also use either of the following methods to check or configure the driver: - -- Edit the **/etc/docker/daemon.json** file to check or configure the **storage-driver** field. - - ``` - cat /etc/docker/daemon.json - { - "storage-driver": "devicemapper" - } - ``` - - -- Edit the **/etc/sysconfig/docker-storage** file and check or configure the Docker daemon startup parameters. - - ``` - cat /etc/sysconfig/docker-storage - DOCKER_STORAGE_OPTIONS="--storage-driver=devicemapper" - ``` - - -#### Precautions - -- To use devicemapper, you must use the direct-lvm mode. For details about the configuration method, refer to [https://docs.docker.com/engine/userguide/storagedriver/device-mapper-driver/\#configure-direct-lvm-mode-for-production](https://docs.docker.com/engine/userguide/storagedriver/device-mapper-driver/#configure-direct-lvm-mode-for-production). -- When configuring devicemapper, if the system does not have sufficient space for automatic capacity expansion of thinpool, disable the automatic capacity expansion function. -- Do not set both the following two parameters in the **/etc/lvm/profile/docker-thinpool.profile** file to **100**: - - ``` - activation { - thin_pool_autoextend_threshold=80 - thin_pool_autoextend_percent=20 - } - ``` - -- You are advised to add **--storage-opt dm.use\_deferred\_deletion=true** and **--storage-opt dm.use\_deferred\_removal=true** when using devicemapper. -- When devicemapper is used, you are advised to use Ext4 as the container file system. You need to add **--storage-opt dm.fs=ext4** to the configuration parameters of Docker daemon. -- If graphdriver is devicemapper and the metadata files are damaged and cannot be restored, you need to manually restore the metadata files. Do not directly operate or tamper with metadata of the devicemapper storage driver in Docker daemon. -- When the devicemapper LVM is used, if the devicemapper thinpool is damaged due to abnormal power-off, you cannot ensure the data integrity or whether the damaged thinpool can be restored. Therefore, you need to rebuild the thinpool. - -**Precautions for Switching the devicemapper Storage Pool When the User Namespace Feature Is Enabled on Docker Daemon** - -- Generally, the path of the deviceset-metadata file is **/var/lib/docker/devicemapper/metadata/deviceset-metadata** during container startup. -- If user namespaces are used, the path of the deviceset-metadata file is **/var/lib/docker/**_userNSUID.GID_**/devicemapper/metadata/deviceset-metadata**. -- When you use the devicemapper storage driver and the container is switched between the user namespace scenario and common scenario, the **BaseDeviceUUID** content in the corresponding deviceset-metadata file needs to be cleared. In the thinpool capacity expansion or rebuild scenario, you also need to clear the **BaseDeviceUUID** content in the deviceset-metadata file. Otherwise, the Docker service fails to be restarted. - -## Impact of Forcibly Killing Docker Background Processes - -The call chain of Docker is long. Forcibly killing docker background processes \(such as sending **kill -9**\) may cause data status inconsistency. This section describes some problems that may be caused by forcible killing. - -### Semaphores May Be Residual - -When the devicemapper is used as the graphdriver, forcible killing may cause residual semaphores. Docker creates semaphores when performing operations on devicemapper. If daemon is forcibly killed before the semaphores are released, the release may fail. A maximum of one semaphore can be leaked at a time, and the leakage probability is low. However, the Linux OS has an upper limit on semaphores. When the number of semaphore leakage times reaches the upper limit, new semaphores cannot be created. As a result, Docker daemon fails to be started. The troubleshooting method is as follows: - -1. Check the residual semaphores in the system. - - ``` - $ ipcs - ------ Message Queues -------- - key msqid owner perms used-bytes messages - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - ------ Semaphore Arrays -------- - key semid owner perms nsems - 0x0d4d3358 238977024 root 600 1 - 0x0d4d0ec9 270172161 root 600 1 - 0x0d4dc02e 281640962 root 600 1 - ``` - -2. Run the **dmsetup** command to check semaphores created by devicemapper. The semaphore set is the subset of the system semaphores queried in the previous step. - - ``` - $ dmsetup udevcookies - ``` - -3. Check the upper limit of kernel semaphores. The fourth value is the upper limit of the current system semaphores. - - ``` - $ cat /proc/sys/kernel/sem - 250 32000 32 128 - ``` - - If the number of residual semaphores in step 1 is the same as the upper limit of semaphores in step 3, the number of residual semaphores reaches the upper limit. In this case, Docker daemon cannot be normally started. You can run the following command to increase the upper limit to restart Docker: - - ``` - $ echo 250 32000 32 1024 > /proc/sys/kernel/sem - ``` - - You can also run the following command to manually clear the residual devicemapper semaphores. The following describes how to clear the devicemapper semaphores applied one minute ago. - - ``` - $ dmsetup udevcomplete_all 1 - This operation will destroy all semaphores older than 1 minutes with keys that have a prefix 3405 (0xd4d). - Do you really want to continue? [y/n]: y - 0 semaphores with keys prefixed by 3405 (0xd4d) destroyed. 0 skipped. - ``` - - -### NICs May Be Residual - -When a container is started in bridge mode, forcibly killing may cause residual NICs. In bridge network mode, when Docker creates a container, a pair of veths are created on the host, and then the NIC information is saved to the database. If daemon is forcibly killed before the NIC information is saved to the database of Docker, the NIC cannot be associated with Docker and cannot be deleted during the next startup because Docker deletes unused NICs from its database. - -### Failed to Restart a Container - -If container hook takes a long time, and containerd is forcibly killed during container startup, the container start operation may fail. When containerd is forcibly killed during container startup, an error is returned for the Docker start operation. After containerd is restarted, the last startup may still be in the **runc create** execution phase \(executing the user-defined hook may take a long time\). If you run the **docker start** command again to start the container, the following error message may be displayed: - -``` -Error response from daemon: oci runtime error: container with id exists: xxxxxx -``` - -This error is caused by running **runc create** on an existing container \(or being created\). After the **runc create** operation corresponding to the first start operation is complete, the **docker start** command can be successfully executed. - -The execution of hook is not controlled by Docker. In this case, if the container is recycled, the containerd process may be suspended when an unknown hook program is executed. In addition, the risk is controllable \(although the creation of the current container is affected in a short period\). - -- After the first operation is complete, the container can be successfully started again. -- Generally, a new container is created after the container fails to be started. The container that fails to be started cannot be reused. - -In conclusion, this problem has a constraint on scenarios. - -### Failed to Restart the Docker Service - -The Docker service cannot be restarted properly due to frequent startup in a short period The Docker system service is monitored by systemd. If the Docker service is restarted for more than five times within 10s, the systemd service detects the abnormal startup. Therefore, the Docker service is disabled. Docker can respond to the restart command and be normally restarted only when the next period of 10s starts. - -## Impact of System Power-off - -When a system is unexpectedly powered off or system panic occurs, Docker daemon status may not be updated to the disk in time. As a result, Docker daemon is abnormal after the system is restarted. The possible problems include but are not limited to the following: - -- A container is created before the power-off. After the restart, the container is not displayed when the **docker ps –a** command is run, as the file status of the container is not updated to the disk. As a result, daemon cannot obtain the container status after the restart. -- Before the system power-off, a file is being written. After daemon is restarted, the file format is incorrect or the file content is incomplete. As a result, loading fails. -- As Docker database \(DB\) will be damaged during power-off, all DB files in **data-root** will be deleted during node restart. Therefore, the following information created before the restart will be deleted after the restart: - - Network: Resources created through Docker network will be deleted after the node is restarted. - - Volume: Resources created through Docker volume will be deleted after the node is restarted. - - Cache construction: The cache construction information will be deleted after the node is restarted. - - Metadata stored in containerd: Metadata stored in containerd will be recreated when a container is started. Therefore, the metadata stored in containerd will be deleted when the node is restarted. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >If you want to manually clear data and restore the environment, you can set the environment variable **DISABLE\_CRASH\_FILES\_DELETE** to **true** to disable the function of clearing DB files when the daemon process is restarted due to power-off. - - - diff --git a/docs/en/docs/Container/installation-configuration.md b/docs/en/docs/Container/installation-configuration.md deleted file mode 100644 index 50f0d97008c28b7b1441fae52af44bc0ff695998..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/installation-configuration.md +++ /dev/null @@ -1,977 +0,0 @@ -# Installation and Configuration - - -- [Installation and Configuration](./installation-configuration) - - [Installation Methods](#installation-methods) - - [Deployment Configuration](#deployment-configuration) - - [Configuration Mode](#configuration-mode) - - [Storage Description](#storage-description) - - [Constraints](#constraints) - - [Daemon Multi-Port Binding](#daemon-multi-port-binding) - - [Configuring TLS Authentication and Enabling Remote Access](#configuring-tls-authentication-and-enabling-remote-access) - - [devicemapper Storage Driver Configuration](#devicemapper-storage-driver-configuration) - - - -## Installation Methods - -iSulad can be installed by running the **yum** or **rpm** command. The **yum** command is recommended because dependencies can be installed automatically. - -This section describes two installation methods. - -- \(Recommended\) Run the following command to install iSulad: - - ``` - $ sudo yum install -y iSulad - ``` - - -- If the **rpm** command is used to install iSulad, you need to download and manually install the RMP packages of iSulad and all its dependencies. To install the RPM package of a single iSulad \(the same for installing dependency packages\), run the following command: - - ``` - $ sudo rpm -ihv iSulad-xx.xx.xx-YYYYmmdd.HHMMSS.gitxxxxxxxx.aarch64.rpm - ``` - - -## Deployment Configuration - -### Configuration Mode - -The iSulad server daemon **isulad** can be configured with a configuration file or by running the **isulad --xxx** command. The priority in descending order is as follows: CLI \> configuration file \> default configuration in code. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->If systemd is used to manage the iSulad process, modify the **OPTIONS** field in the **/etc/sysconfig/iSulad** file, which functions the same as using the CLI. - -- **CLI** - - During service startup, configure iSulad using the CLI. To view the configuration options, run the following command: - - ``` - $ isulad --help - lightweight container runtime daemon - - Usage: isulad [global options] - - GLOBAL OPTIONS: - - --authorization-plugin Use authorization plugin - --cgroup-parent Set parent cgroup for all containers - --cni-bin-dir The full path of the directory in which to search for CNI plugin binaries. Default: /opt/cni/bin - --cni-conf-dir The full path of the directory in which to search for CNI config files. Default: /etc/cni/net.d - --default-ulimit Default ulimits for containers (default []) - -e, --engine Select backend engine - -g, --graph Root directory of the iSulad runtime - -G, --group Group for the unix socket(default is isulad) - --help Show help - --hook-spec Default hook spec file applied to all containers - -H, --host The socket name used to create gRPC server - --image-layer-check Check layer intergrity when needed - --image-opt-timeout Max timeout(default 5m) for image operation - --insecure-registry Disable TLS verification for the given registry - --insecure-skip-verify-enforce Force to skip the insecure verify(default false) - --log-driver Set daemon log driver, such as: file - -l, --log-level Set log level, the levels can be: FATAL ALERT CRIT ERROR WARN NOTICE INFO DEBUG TRACE - --log-opt Set daemon log driver options, such as: log-path=/tmp/logs/ to set directory where to store daemon logs - --native.umask Default file mode creation mask (umask) for containers - --network-plugin Set network plugin, default is null, suppport null and cni - -p, --pidfile Save pid into this file - --pod-sandbox-image The image whose network/ipc namespaces containers in each pod will use. (default "rnd-dockerhub.huawei.com/library/pause-${machine}:3.0") - --registry-mirrors Registry to be prepended when pulling unqualified images, can be specified multiple times - --start-timeout timeout duration for waiting on a container to start before it is killed - -S, --state Root directory for execution state files - --storage-driver Storage driver to use(default overlay2) - -s, --storage-opt Storage driver options - --tls Use TLS; implied by --tlsverify - --tlscacert Trust certs signed only by this CA (default "/root/.iSulad/ca.pem") - --tlscert Path to TLS certificate file (default "/root/.iSulad/cert.pem") - --tlskey Path to TLS key file (default "/root/.iSulad/key.pem") - --tlsverify Use TLS and verify the remote - --use-decrypted-key Use decrypted private key by default(default true) - -V, --version Print the version - --websocket-server-listening-port CRI websocket streaming service listening port (default 10350) - ``` - - Example: Start iSulad and change the log level to DEBUG. - - ``` - $ isulad -l DEBUG - ``` - - -- **Configuration file** - - The iSulad configuration file is **/etc/isulad/daemon.json**. The parameters in the file are described as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Example

-

Description

-

Remarks

-

-e, --engine

-

"engine": "lcr"

-

iSulad runtime, which is Icr by default.

-

None

-

-G, --group

-

"group": "isulad"

-

Socket group.

-

None

-

--hook-spec

-

"hook-spec": "/etc/default/isulad/hooks/default.json"

-

Default hook configuration file for all containers.

-

None

-

-H, --host

-

"hosts": "unix:///var/run/isulad.sock"

-

Communication mode.

-

In addition to the local socket, the tcp://ip:port mode is supported. The port number ranges from 0 to 65535, excluding occupied ports.

-

--log-driver

-

"log-driver": "file"

-

Log driver configuration.

-

None

-

-l, --log-level

-

"log-level": "ERROR"

-

Log output level.

-

None

-

--log-opt

-

"log-opts": {

-

"log-file-mode": "0600",

-

"log-path": "/var/lib/isulad",

-

"max-file": "1",

-

"max-size": "30KB"

-

}

-

Log-related configuration.

-

You can specify max-file, max-size, and log-path. max-file indicates the number of log files. max-size indicates the threshold for triggering log anti-explosion. If max-file is 1, max-size is invalid. log-path specifies the path for storing log files. The log-file-mode command is used to set the permissions to read and write log files. The value must be in octal format, for example, 0666.

-

--start-timeout

-

"start-timeout": "2m"

-

Time required for starting a container.

-

None

-

--runtime

-

"default-runtime": "lcr"

-

Container runtime, which is lcr by default.

-

If neither the CLI nor the configuration file specifies the runtime, lcr is used by default. The priorities of the three specifying methods are as follows: CLI > configuration file > default value lcr. Currently, lcr and kata-runtime are supported.

-

None

-
"runtimes":  {
-        "kata-runtime": {
-          "path": "/usr/bin/kata-runtime",
-          "runtime-args": [
-            "--kata-config",
-            "/usr/share/defaults/kata-containers/configuration.toml"
-          ]
-        }
-    }
-

When starting a container, set this parameter to specify multiple runtimes. Runtimes in this set are valid for container startup.

-

Runtime whitelist of a container. The customized runtimes in this set are valid. kata-runtime is used as the example.

-

-p, --pidfile

-

"pidfile": "/var/run/isulad.pid"

-

File for storing PIDs.

-

This parameter is required only when more than two container engines need to be started.

-

-g, --graph

-

"graph": "/var/lib/isulad"

-

Root directory for iSulad runtimes.

-

-S, --state

-

"state": "/var/run/isulad"

-

Root directory of the execution file.

-

--storage-driver

-

"storage-driver": "overlay2"

-

Image storage driver, which is overlay2 by default.

-

Only overlay2 is supported.

-

-s, --storage-opt

-

"storage-opts": [ "overlay2.override_kernel_check=true" ]

-

Image storage driver configuration options.

-

The options are as follows:

-
overlay2.override_kernel_check=true #Ignore the kernel version check.
-    overlay2.size=${size} #Set the rootfs quota to ${size}.
-    overlay2.basesize=${size} #It is equivalent to overlay2.size.
-

--image-opt-timeout

-

"image-opt-timeout": "5m"

-

Image operation timeout interval, which is 5m by default.

-

The value -1 indicates that the timeout interval is not limited.

-

--registry-mirrors

-

"registry-mirrors": [ "docker.io" ]

-

Registry address.

-

None

-

--insecure-registry

-

"insecure-registries": [ ]

-

Registry without TLS verification.

-

None

-

--native.umask

-

"native.umask": "secure"

-

Container umask policy. The default value is secure. The value normal indicates insecure configuration.

-

Set the container umask value.

-

The value can be null (0027 by default), normal, or secure.

-
normal #The umask value of the started container is 0022.
-    secure #The umask value of the started container is 0027 (default value).
-

--pod-sandbox-image

-

"pod-sandbox-image": "rnd-dockerhub.huawei.com/library/pause-aarch64:3.0"

-

By default, the pod uses the image. The default value is rnd-dockerhub.huawei.com/library/pause-${machine}:3.0.

-

None

-

--network-plugin

-

"network-plugin": ""

-

Specifies a network plug-in. The value is a null character by default, indicating that no network configuration is available and the created sandbox has only the loop NIC.

-

The CNI and null characters are supported. Other invalid values will cause iSulad startup failure.

-

--cni-bin-dir

-

"cni-bin-dir": ""

-

Specifies the storage location of the binary file on which the CNI plug-in depends.

-

The default value is /opt/cni/bin.

-

--cni-conf-dir

-

"cni-conf-dir": ""

-

Specifies the storage location of the CNI network configuration file.

-

The default value is /etc/cni/net.d.

-

--image-layer-check=false

-

"image-layer-check": false

-

Image layer integrity check. To enable the function, set it to true; otherwise, set it to false. It is disabled by default.

-

When iSulad is started, the image layer integrity is checked. If the image layer is damaged, the related images are unavailable. iSulad cannot verify empty files, directories, and link files. Therefore, if the preceding files are lost due to a power failure, the integrity check of iSulad image data may fail to be identified. When the iSulad version changes, check whether the parameter is supported. If not, delete it from the configuration file.

-

--insecure-skip-verify-enforce=false

-

"insecure-skip-verify-enforce": false

-

Indicates whether to forcibly skip the verification of the certificate host name/domain name. The value is of the Boolean type, and the default value is false. If this parameter is set to true, the verification of the certificate host name/domain name is skipped.

-

The default value is false (not skipped). Note: Restricted by the YAJL JSON parsing library, if a non-Boolean value that meets the JSON format requirements is configured in the /etc/isulad/daemon.json configuration file, the default value used by iSulad is false.

-

--use-decrypted-key=true

-

"use-decrypted-key": true

-

Specifies whether to use an unencrypted private key. The value is of the Boolean type. If this parameter is set to true, an unencrypted private key is used. If this parameter is set to false, the encrypted private key is used, that is, two-way authentication is required.

-

The default value is true, indicating that an unencrypted private key is used. Note: Restricted by the YAJL JSON parsing library, if a non-Boolean value that meets the JSON format requirements is configured in the /etc/isulad/daemon.json configuration file, the default value used by iSulad is true.

-

--tls

-

"tls":false

-

Specifies whether to use TLS. The value is of the Boolean type.

-

This parameter is used only in -H tcp://IP:PORT mode. The default value is false.

-

--tlsverify

-

"tlsverify":false

-

Specifies whether to use TLS and verify remote access. The value is of the Boolean type.

-

This parameter is used only in -H tcp://IP:PORT mode.

-

--tlscacert

-

--tlscert

-

--tlskey

-

"tls-config": {

-

"CAFile": "/root/.iSulad/ca.pem",

-

"CertFile": "/root/.iSulad/server-cert.pem",

-

"KeyFile":"/root/.iSulad/server-key.pem"

-

}

-

TLS certificate-related configuration.

-

This parameter is used only in -H tcp://IP:PORT mode.

-

--authorization-plugin

-

"authorization-plugin": "authz-broker"

-

User permission authentication plugin.

-

Only authz-broker is supported.

-

--cgroup-parent

-

"cgroup-parent": "lxc/mycgroup"

-

Default cgroup parent path of a container, which is of the string type.

-

Specifies the cgroup parent path of a container. If --cgroup-parent is specified on the client, the client parameter prevails.

-

Note: If container A is started before container B, the cgroup parent path of container B is specified as the cgroup path of container A. When deleting a container, you need to delete container B and then container A in sequence. Otherwise, residual cgroup resources exist.

-

--default-ulimits

-

"default-ulimits": {

-

"nofile": {

-

"Name": "nofile",

-

"Hard": 6400,

-

"Soft": 3200

-

}

-

}

-

Specifies the ulimit restriction type, soft value, and hard value.

-

Specifies the restricted resource type, for example, nofile. The two field names must be the same, that is, nofile. Otherwise, an error is reported. The value of Hard must be greater than or equal to that of Soft. If the Hard or Soft field is not set, the default value 0 is used.

-

--websocket-server-listening-port

-

"websocket-server-listening-port": 10350

-

Specifies the listening port of the CRI WebSocket streaming service. The default port number is 10350.

-

Specifies the listening port of the CRI websocket streaming service.

-

If the client specifies --websocket-server-listening-port, the specified value is used. The port number ranges from 1024 to 49151.

-
- - Example: - - ``` - $ cat /etc/isulad/daemon.json - { - "group": "isulad", - "default-runtime": "lcr", - "graph": "/var/lib/isulad", - "state": "/var/run/isulad", - "engine": "lcr", - "log-level": "ERROR", - "pidfile": "/var/run/isulad.pid", - "log-opts": { - "log-file-mode": "0600", - "log-path": "/var/lib/isulad", - "max-file": "1", - "max-size": "30KB" - }, - "log-driver": "stdout", - "hook-spec": "/etc/default/isulad/hooks/default.json", - "start-timeout": "2m", - "storage-driver": "overlay2", - "storage-opts": [ - "overlay2.override_kernel_check=true" - ], - "registry-mirrors": [ - "docker.io" - ], - "insecure-registries": [ - "rnd-dockerhub.huawei.com" - ], - "pod-sandbox-image": "", - "image-opt-timeout": "5m", - "native.umask": "secure", - "network-plugin": "", - "cni-bin-dir": "", - "cni-conf-dir": "", - "image-layer-check": false, - "use-decrypted-key": true, - "insecure-skip-verify-enforce": false - } - ``` - - >![](./public_sys-resources/icon-notice.gif) **NOTICE:** - >The default configuration file **/etc/isulad/daemon.json** is for reference only. Configure it based on site requirements. - - -### Storage Description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

File

-

Directory

-

Description

-

\*

-

/etc/default/isulad/

-

Stores the OCI configuration file and hook template file of iSulad. The file configuration permission is set to 0640, and the sysmonitor check permission is set to 0550.

-

\*

-

/etc/isulad/

-

Default configuration files of iSulad and seccomp.

-

isulad.sock

-

/var/run/

-

Pipe communication file, which is used for the communication between the client and iSulad.

-

isulad.pid

-

/var/run/

-

File for storing the iSulad PIDs. It is also a file lock to prevent multiple iSulad instances from being started.

-

\*

-

/run/lxc/

-

Lock file, which is created during iSulad running.

-

\*

-

/var/run/isulad/

-

Real-time communication cache file, which is created during iSulad running.

-

\*

-

/var/run/isula/

-

Real-time communication cache file, which is created during iSulad running.

-

\*

-

/var/lib/lcr/

-

Temporary directory of the LCR component.

-

\*

-

/var/lib/isulad/

-

Root directory where iSulad runs, which stores the created container configuration, default log path, database file, and mount point.

-

/var/lib/isulad/mnt/: mount point of the container rootfs.

-

/var/lib/isulad/engines/lcr/: directory for storing LCR container configurations. Each container has a directory named after the container.

-
- -### Constraints - -- In high concurrency scenarios \(200 containers are concurrently started\), the memory management mechanism of Glibc may cause memory holes and large virtual memory \(for example, 10 GB\). This problem is caused by the restriction of the Glibc memory management mechanism in the high concurrency scenario, but not by memory leakage. Therefore, the memory consumption does not increase infinitely. You can set **MALLOC\_ARENA\_MAX** to reducevirtual memory error and increase the rate of reducing physical memory. However, this environment variable will cause the iSulad concurrency performance to deteriorate. Set this environment variable based on the site requirements. - - ``` - To balance performance and memory usage, set MALLOC_ARENA_MAX to 4. (The iSulad performance on the ARM64 server is affected by less than 10%.) - - Configuration method: - 1. To manually start iSulad, run the export MALLOC_ARENA_MAX=4 command and then start iSulad. - 2. If systemd manages iSulad, you can modify the /etc/sysconfig/iSulad file by adding MALLOC_ARENA_MAX=4. - ``` - -- Precautions for specifying the daemon running directories - - Take **--root** as an example. When **/new/path/** is used as the daemon new root directory, if a file exists in **/new/path/** and the directory or file name conflicts with that required by iSulad \(for example, **engines** and **mnt**\), iSulad may update the original directory or file attributes including the owner and permission. - - Therefore, please note the impact of re-specifying various running directories and files on their attributes. You are advised to specify a new directory or file for iSulad to avoid file attribute changes and security issues caused by conflicts. - -- Log file management: - - >![](./public_sys-resources/icon-notice.gif) **NOTICE:** - >Log function interconnection: logs are managed by systemd as iSulad is and then transmitted to rsyslogd. By default, rsyslog restricts the log writing speed. You can add the configuration item **$imjournalRatelimitInterval 0** to the **/etc/rsyslog.conf** file and restart the rsyslogd service. - -- Restrictions on command line parameter parsing - - When the iSulad command line interface is used, the parameter parsing mode is slightly different from that of Docker. For flags with parameters in the command line, regardless of whether a long or short flag is used, only the first space after the flag or the character string after the equal sign \(=\) directly connected to the flag is used as the flag parameter. The details are as follows: - - 1. When a short flag is used, each character in the character string connected to the hyphen \(-\) is considered as a short flag. If there is an equal sign \(=\), the character string following the equal sign \(=\) is considered as the parameter of the short flag before the equal sign \(=\). - - **isula run -du=root busybox** is equivalent to **isula run -du root busybox**, **isula run -d -u=root busybox**, or **isula run -d -u root busybox**. When **isula run -du:root** is used, as **-:** is not a valid short flag, an error is reported. The preceding command is equivalent to **isula run -ud root busybox**. However, this method is not recommended because it may cause semantic problems. - - 1. When a long flag is used, the character string connected to **--** is regarded as a long flag. If the character string contains an equal sign \(=\), the character string before the equal sign \(=\) is a long flag, and the character string after the equal sign \(=\) is a parameter. - - ``` - isula run --user=root busybox - ``` - - or - - ``` - isula run --user root busybox - ``` - - -- After an iSulad container is started, you cannot run the **isula run -i/-t/-ti** and **isula attach/exec** commands as a non-root user. -- When iSulad connects to an OCI container, only kata-runtime can be used to start the OCI container. - -### Daemon Multi-Port Binding - -#### Description - -The daemon can bind multiple UNIX sockets or TCP ports and listen on these ports. The client can interact with the daemon through these ports. - -#### Port - -Users can configure one or more ports in the hosts field in the **/etc/isulad/daemon.json** file, or choose not to specify hosts. - -``` -{ - "hosts": [ - "unix:///var/run/isulad.sock", - "tcp://localhost:5678", - "tcp://127.0.0.1:6789" - ] -} -``` - -Users can also run the **-H** or **--host** command in the **/etc/sysconfig/iSulad** file to configure a port, or choose not to specify hosts. - -``` -OPTIONS='-H unix:///var/run/isulad.sock --host tcp://127.0.0.1:6789' -``` - -If hosts are not specified in the **daemon.json** file and iSulad, the daemon listens on **unix:///var/run/isulad.sock** by default after startup. - -#### Restrictions - -- Users cannot specify hosts in the **/etc/isulad/daemon.json** and **/etc/sysconfig/iSuald** files at the same time. Otherwise, an error will occur and iSulad cannot be started. - - ``` - unable to configure the isulad with file /etc/isulad/daemon.json: the following directives are specified both as a flag and in the configuration file: hosts: (from flag: [unix:///var/run/isulad.sock tcp://127.0.0.1:6789], from file: [unix:///var/run/isulad.sock tcp://localhost:5678 tcp://127.0.0.1:6789]) - ``` - -- If the specified host is a UNIX socket, the socket must start with **unix://** followed by a valid absolute path. -- If the specified host is a TCP port, the TCP port number must start with **tcp://** followed by a valid IP address and port number. The IP address can be that of the local host. -- A maximum of 10 valid ports can be specified. If more than 10 ports are specified, an error will occur and iSulad cannot be started. - -### Configuring TLS Authentication and Enabling Remote Access - -#### Description - -iSulad is designed in C/S mode. By default, the iSulad daemon process listens only on the local/var/run/isulad.sock. Therefore, you can run commands to operate containers only on the local client iSula. To enable iSula's remote access to the container, the iSulad daemon process needs to listen on the remote access port using TCP/IP. However, listening is performed only by simply configuring tcp ip:port. In this case, all IP addresses can communicate with iSulad by calling **isula -H tcp://**_remote server IP address_**:port**, which may cause security problems. Therefore, it is recommended that a more secure version, namely Transport Layer Security \(TLS\), be used for remote access. - -#### Generating TLS Certificate - -- Example of generating a plaintext private key and certificate - - ``` - #!/bin/bash - set -e - echo -n "Enter pass phrase:" - read password - echo -n "Enter public network ip:" - read publicip - echo -n "Enter host:" - read HOST - - echo " => Using hostname: $publicip, You MUST connect to iSulad using this host!" - - mkdir -p $HOME/.iSulad - cd $HOME/.iSulad - rm -rf $HOME/.iSulad/* - - echo " => Generating CA key" - openssl genrsa -passout pass:$password -aes256 -out ca-key.pem 4096 - echo " => Generating CA certificate" - openssl req -passin pass:$password -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem -subj "/C=CN/ST=zhejiang/L=hangzhou/O=Huawei/OU=iSulad/CN=iSulad@huawei.com" - echo " => Generating server key" - openssl genrsa -passout pass:$password -out server-key.pem 4096 - echo " => Generating server CSR" - openssl req -passin pass:$password -subj /CN=$HOST -sha256 -new -key server-key.pem -out server.csr - echo subjectAltName = DNS:$HOST,IP:$publicip,IP:127.0.0.1 >> extfile.cnf - echo extendedKeyUsage = serverAuth >> extfile.cnf - echo " => Signing server CSR with CA" - openssl x509 -req -passin pass:$password -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -extfile extfile.cnf - echo " => Generating client key" - openssl genrsa -passout pass:$password -out key.pem 4096 - echo " => Generating client CSR" - openssl req -passin pass:$password -subj '/CN=client' -new -key key.pem -out client.csr - echo " => Creating extended key usage" - echo extendedKeyUsage = clientAuth > extfile-client.cnf - echo " => Signing client CSR with CA" - openssl x509 -req -passin pass:$password -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile extfile-client.cnf - rm -v client.csr server.csr extfile.cnf extfile-client.cnf - chmod -v 0400 ca-key.pem key.pem server-key.pem - chmod -v 0444 ca.pem server-cert.pem cert.pem - ``` - - -- Example of generating an encrypted private key and certificate request file - - ``` - #!/bin/bash - - echo -n "Enter public network ip:" - read publicip - echo -n "Enter pass phrase:" - read password - - # remove certificates from previous execution. - rm -f *.pem *.srl *.csr *.cnf - - - # generate CA private and public keys - echo 01 > ca.srl - openssl genrsa -aes256 -out ca-key.pem -passout pass:$password 2048 - openssl req -subj '/C=CN/ST=zhejiang/L=hangzhou/O=Huawei/OU=iSulad/CN=iSulad@huawei.com' -new -x509 -days $DAYS -passin pass:$password -key ca-key.pem -out ca.pem - - # create a server key and certificate signing request (CSR) - openssl genrsa -aes256 -out server-key.pem -passout pass:$PASS 2048 - openssl req -new -key server-key.pem -out server.csr -passin pass:$password -subj '/CN=iSulad' - - echo subjectAltName = DNS:iSulad,IP:${publicip},IP:127.0.0.1 > extfile.cnf - echo extendedKeyUsage = serverAuth >> extfile.cnf - # sign the server key with our CA - openssl x509 -req -days $DAYS -passin pass:$password -in server.csr -CA ca.pem -CAkey ca-key.pem -out server-cert.pem -extfile extfile.cnf - - # create a client key and certificate signing request (CSR) - openssl genrsa -aes256 -out key.pem -passout pass:$password 2048 - openssl req -subj '/CN=client' -new -key key.pem -out client.csr -passin pass:$password - - # create an extensions config file and sign - echo extendedKeyUsage = clientAuth > extfile.cnf - openssl x509 -req -days 365 -passin pass:$password -in client.csr -CA ca.pem -CAkey ca-key.pem -out cert.pem -extfile extfile.cnf - - # remove the passphrase from the client and server key - openssl rsa -in server-key.pem -out server-key.pem -passin pass:$password - openssl rsa -in key.pem -out key.pem -passin pass:$password - - # remove generated files that are no longer required - rm -f ca-key.pem ca.srl client.csr extfile.cnf server.csr - ``` - - -#### APIs - -``` -{ - "tls": true, - "tls-verify": true, - "tls-config": { - "CAFile": "/root/.iSulad/ca.pem", - "CertFile": "/root/.iSulad/server-cert.pem", - "KeyFile":"/root/.iSulad/server-key.pem" - } -} -``` - -#### Restrictions - -The server supports the following modes: - -- Mode 1 \(client verified\): tlsverify, tlscacert, tlscert, tlskey -- Mode 2 \(client not verified\): tls, tlscert, tlskey - -The client supports the following modes: - -- Mode 1 \(verify the identity based on the client certificate, and verify the server based on the specified CA\): tlsverify, tlscacert, tlscert, tlskey -- Mode 2 \(server verified\): tlsverify, tlscacert - -Mode 1 is used for the server, and mode 2 for the client if the two-way authentication mode is used for communication. - -Mode 2 is used for the server and the client if the unidirectional authentication mode is used for communication. - ->![](./public_sys-resources/icon-notice.gif) **NOTICE:** ->- If RPM is used for installation, the server configuration can be modified in the **/etc/isulad/daemon.json** and **/etc/sysconfig/iSulad** files. ->- Two-way authentification is recommended as it is more secure than non-authentication or unidirectional authentication. ->- GRPC open-source component logs are not taken over by iSulad. To view gRPC logs, set the environment variables **gRPC\_VERBOSITY** and **gRPC\_TRACE** as required. ->   - -#### Example - -On the server: - -``` - isulad -H=tcp://0.0.0.0:2376 --tlsverify --tlscacert ~/.iSulad/ca.pem --tlscert ~/.iSulad/server-cert.pem --tlskey ~/.iSulad/server-key.pem -``` - -On the client: - -``` - isula version -H=tcp://$HOSTIP:2376 --tlsverify --tlscacert ~/.iSulad/ca.pem --tlscert ~/.iSulad/cert.pem --tlskey ~/.iSulad/key.pem -``` - -### devicemapper Storage Driver Configuration - -To use the devicemapper storage driver, you need to configure a thinpool device which requires an independent block device with sufficient free space. Take the independent block device **/dev/xvdf** as an example. The configuration method is as follows: - -1. Configuring a thinpool - -1. Stop the iSulad service. - - ``` - # systemctl stop isulad - ``` - -2. Create a logical volume manager \(LVM\) volume based on the block device. - - ``` - # pvcreate /dev/xvdf - ``` - -3. Create a volume group based on the created physical volume. - - ``` - # vgcreate isula /dev/xvdf - Volume group "isula" successfully created: - ``` - -4. Create two logical volumes named **thinpool** and **thinpoolmeta**. - - ``` - # lvcreate --wipesignatures y -n thinpool isula -l 95%VG - Logical volume "thinpool" created. - ``` - - ``` - # lvcreate --wipesignatures y -n thinpoolmeta isula -l 1%VG - Logical volume "thinpoolmeta" created. - ``` - -5. Convert the two logical volumes into a thinpool and the metadata used by the thinpool. - - ``` - # lvconvert -y --zero n -c 512K --thinpool isula/thinpool --poolmetadata isula/thinpoolmeta - - WARNING: Converting logical volume isula/thinpool and isula/thinpoolmeta to - thin pool's data and metadata volumes with metadata wiping. - THIS WILL DESTROY CONTENT OF LOGICAL VOLUME (filesystem etc.) - Converted isula/thinpool to thin pool. - ``` - - -   - -2. Modifying the iSulad configuration files - -1. If iSulad has been used in the environment, back up the running data first. - - ``` - # mkdir /var/lib/isulad.bk - # mv /var/lib/isulad/* /var/lib/isulad.bk - ``` - -2. Modify configuration files. - - Two configuration methods are provided. Select one based on site requirements. - - - Edit the **/etc/isulad/daemon.json** file, set **storage-driver** to **devicemapper**, and set parameters related to the **storage-opts** field. For details about related parameters, see [Parameter Description](#en-us_topic_0222861454_section1712923715282). The following lists the configuration reference: - - ``` - { - "storage-driver": "devicemapper" - "storage-opts": [ - "dm.thinpooldev=/dev/mapper/isula-thinpool", - "dm.fs=ext4", - "dm.min_free_space=10%" - ] - } - ``` - - - You can also edit **/etc/sysconfig/iSulad** to explicitly specify related iSulad startup parameters. For details about related parameters, see [Parameter Description](#en-us_topic_0222861454_section1712923715282). The following lists the configuration reference: - - ``` - OPTIONS="--storage-driver=devicemapper --storage-opt dm.thinpooldev=/dev/mapper/isula-thinpool --storage-opt dm.fs=ext4 --storage-opt dm.min_free_space=10%" - ``` - -3. Start iSulad for the settings to take effect. - - ``` - # systemctl start isulad - ``` - - -#### Parameter Description - -For details about parameters supported by storage-opts, see [Table 1](#en-us_topic_0222861454_table3191161993812). - -**Table 1** Parameter description - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Mandatory or Not

-

Description

-

dm.fs

-

Yes

-

Specifies the type of the file system used by a container. This parameter must be set to ext4, that is, dm.fs=ext4.

-

dm.basesize

-

No

-

Specifies the maximum storage space of a single container. The unit can be k, m, g, t, or p. An uppercase letter can also be used, for example, dm.basesize=50G. This parameter is valid only during the first initialization.

-

dm.mkfsarg

-

No

-

Specifies the additional mkfs parameters when a basic device is created. For example: dm.mkfsarg=-O ^has_journal

-

dm.mountopt

-

No

-

Specifies additional mount parameters when a container is mounted. For example: dm.mountopt=nodiscard

-

dm.thinpooldev

-

No

-

Specifies the thinpool device used for container or image storage.

-

dm.min_free_space

-

No

-

Specifies minimum percentage of reserved space. For example, dm.min_free_space=10% indicates that storage-related operations such as container creation will fail when the remaining storage space falls below 10%.

-
- -#### Precautions - -- When configuring devicemapper, if the system does not have sufficient space for automatic capacity expansion of thinpool, disable the automatic capacity expansion function. - - To disable automatic capacity expansion, set both **thin\_pool\_autoextend\_threshold** and **thin\_pool\_autoextend\_percent** in the **/etc/lvm/profile/isula-thinpool.profile** file to **100**. - - ``` - activation { - thin_pool_autoextend_threshold=100 - thin_pool_autoextend_percent=100 - } - ``` - -- When devicemapper is used, use Ext4 as the container file system. You need to add **--storage-opt dm.fs=ext4** to the iSulad configuration parameters. -- If graphdriver is devicemapper and the metadata files are damaged and cannot be restored, you need to manually restore the metadata files. Do not directly operate or tamper with metadata of the devicemapper storage driver in Docker daemon. -- When the devicemapper LVM is used, if the devicemapper thinpool is damaged due to abnormal power-off, you cannot ensure the data integrity or whether the damaged thinpool can be restored. Therefore, you need to rebuild the thinpool. - -**Precautions for Switching the devicemapper Storage Pool When the User Namespace Feature Is Enabled on iSula** - -- Generally, the path of the deviceset-metadata file is **/var/lib/isulad/devicemapper/metadata/deviceset-metadata** during container startup. -- If user namespaces are used, the path of the deviceset-metadata file is **/var/lib/isulad/**_userNSUID.GID_**/devicemapper/metadata/deviceset-metadata**. -- When you use the devicemapper storage driver and the container is switched between the user namespace scenario and common scenario, the **BaseDeviceUUID** content in the corresponding deviceset-metadata file needs to be cleared. In the thinpool capacity expansion or rebuild scenario, you also need to clear the **BaseDeviceUUID** content in the deviceset-metadata file. Otherwise, the iSulad service fails to be restarted. - diff --git a/docs/en/docs/Container/installation-guideline.md b/docs/en/docs/Container/installation-guideline.md deleted file mode 100644 index 738f8861408c2709f8d3250cd2228a9cc584e4f3..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/installation-guideline.md +++ /dev/null @@ -1,28 +0,0 @@ -# Installation Guideline - -1. Install the container engine iSulad. - - ``` - # yum install iSulad - ``` - -2. Install dependent packages of system containers. - - ``` - # yum install isulad-tools authz isulad-lxcfs-toolkit lxcfs - ``` - -3. Run the following command to check whether iSulad is started: - - ``` - # systemctl status isulad - ``` - -4. Enable the lxcfs and authz services. - - ``` - # systemctl start lxcfs - # systemctl start authz - ``` - - diff --git a/docs/en/docs/Container/installation-upgrade-Uninstallation.md b/docs/en/docs/Container/installation-upgrade-Uninstallation.md deleted file mode 100644 index b217abab2632fcf0219fdd7879bf1132230ef335..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/installation-upgrade-Uninstallation.md +++ /dev/null @@ -1,4 +0,0 @@ -# Installation, Upgrade and Uninstallation - -This chapter describes how to install, configure, upgrade, and uninstall iSulad. - diff --git a/docs/en/docs/Container/interconnection-with-the-cni-network.md b/docs/en/docs/Container/interconnection-with-the-cni-network.md deleted file mode 100644 index ea181545190975bf2c889636a7975a51db940254..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/interconnection-with-the-cni-network.md +++ /dev/null @@ -1,127 +0,0 @@ -# Interconnection with the CNI Network - -- [Interconnection with the CNI Network](#interconnection-with-the-cni-network) - - [Overview](#overview-0) - - [Common CNIs](#common-cnis) - - [CNI Network Configuration Description](#cni-network-configuration-description) - - [Adding a Pod to the CNI Network List](#adding-a-pod-to-the-cni-network-list) - - [Removing a Pod from the CNI Network List](#removing-a-pod-from-the-cni-network-list) - - [Usage Restrictions](#usage-restrictions) - - -## Overview - -The container runtime interface \(CRI\) is provided to connect to the CNI network, including parsing the CNI network configuration file and adding or removing a pod to or from the CNI network. When a pod needs to support a network through a container network plug-in such as Canal, the CRI needs to be interconnected to Canal so as to provide the network capability for the pod. - -## Common CNIs - -Common CNIs include CNI network configuration items in the CNI network configuration and pod configuration. These CNIs are visible to users. - -- CNI network configuration items in the CNI network configuration refer to those used to specify the path of the CNI network configuration file, path of the binary file of the CNI network plug-in, and network mode. For details, see [Table 1](#en-us_topic_0183259146_table18221919589). -- CNI network configuration items in the pod configuration refer to those used to set the additional CNI network list to which the pod is added. By default, the pod is added only to the default CNI network plane. You can add the pod to multiple CNI network planes as required. - -**Table 1** CNI network configuration items - - - - - - - - - - - - - - - - - - - - - - - - -

Function

-

Command

-

Configuration File

-

Description

-

Path of the binary file of the CNI network plug-in

-

--cni-bin-dir

-

"cni-bin-dir": "",

-

The default value is /opt/cni/bin.

-

Path of the CNI network configuration file

-

--cni-conf-dir

-

"cni-conf-dir": "",

-

The system traverses all files with the extension .conf, .conflist, or .json in the directory. The default value is /etc/cni/net.d.

-

Network mode

-

--network-plugin

-

"network-plugin": "",

-

Specifies a network plug-in. The value is a null character by default, indicating that no network configuration is available and the created sandbox has only the loop NIC. The CNI and null characters are supported. Other invalid values will cause iSulad startup failure.

-
- -Additional CNI network configuration mode: - -Add the network plane configuration item "network.alpha.kubernetes.io/network" to annotations in the pod configuration file. - -The network plane is configured in JSON format, including: - -- **name**: specifies the name of the CNI network plane. -- **interface**: specifies the name of a network interface. - -The following is an example of the CNI network configuration method: - -``` -"annotations" : { - "network.alpha.kubernetes.io/network": "{\"name\": \"mynet\", \"interface\": \"eth1\"}" - } -``` - -   - - -### CNI Network Configuration Description - -The CNI network configuration includes two types, both of which are in the .json file format. - -- Single-network plane configuration file with the file name extension .conf or .json. For details about the configuration items, see [Table 1](#cni-parameters.md#en-us_topic_0184347952_table425023335913) in the appendix. -- Multi-network plane configuration file with the file name extension .conflist. For details about the configuration items, see [Table 3](#cni-parameters.md#en-us_topic_0184347952_table657910563105) in the appendix. - -### Adding a Pod to the CNI Network List - -If **--network-plugin=cni** is configured for iSulad and the default network plane is configured, a pod is automatically added to the default network plane when the pod is started. If the additional network configuration is configured in the pod configuration, the pod is added to these additional network planes when the pod is started. - -**port\_mappings** in the pod configuration is also a network configuration item, which is used to set the port mapping of the pod. To set port mapping, perform the following steps: - -``` -"port_mappings":[ - { - "protocol": 1, - "container_port": 80, - "host_port": 8080 - } -] -``` - -- **protocol**: protocol used for mapping. The value can be **tcp** \(identified by 0\) or **udp** \(identified by 1\). -- **container\_port**: port through which the container is mapped. -- **host\_port**: port mapped to the host. - -### Removing a Pod from the CNI Network List - -When StopPodSandbox is called, the interface for removing a pod from the CNI network list will be called to clear network resources. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->1. Before calling the RemovePodSandbox interface, you must call the StopPodSandbox interface at least once. ->2. If StopPodSandbox fails to call the CNI, residual network resources may exist. - -## Usage Restrictions - -- Currently, only CNI 0.3.0 and CNI 0.3.1 are supported. In later versions, CNI 0.1.0 and CNI 0.2.0 may need to be supported. Therefore, when error logs are displayed, the information about CNI 0.1.0 and CNI 0.2.0 is reserved. -- name: The value must contain lowercase letters, digits, hyphens \(-\), and periods \(.\) and cannot be started or ended with a hyphen or period. The value can contain a maximum of 200 characters. -- The number of configuration files cannot exceed 200, and the size of a single configuration file cannot exceed 1 MB. -- The extended parameters need to be configured based on the actual network requirements. Optional parameters do not need to be written into the netconf.json file. - diff --git a/docs/en/docs/Container/isula-build.md b/docs/en/docs/Container/isula-build.md deleted file mode 100644 index abe20158635ed866c29b81de5b7fd1cffff51982..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/isula-build.md +++ /dev/null @@ -1,836 +0,0 @@ -# Container Image Building - - - - -* [Installation](#installation) - * [Preparations](#preparations) - * [Installing isula-build](#installing-isula-build) -* [Configuring and Managing the isula-build Service](#configuring-and-managing-the-isula-build-service) - * [Configuring the isula-build Service](#configuring-the-isula-build-service) - * [Managing the isula-build Service](#managing-the-isula-build-service) - * [(Recommended) Using systemd for Management](#recommended-using-systemd-for-management) - * [Directly Running isula-builder](#directly-running-isula-builder) -* [Usage Guidelines](#usage-guidelines) - * [Prerequisites](#prerequisites) - * [Overview](#overview) - * [ctr-img: Container Image Management](#ctr-img-container-image-management) - * [build: Container Image Build](#build-container-image-build) - * [image: Viewing Local Persistent Build Images](#image-viewing-local-persistent-build-images) - * [import: Importing a Basic Container Image](#import-importing-a-basic-container-image) - * [load: Importing Cascade Images](#load-importing-cascade-images) - * [rm: Deleting a Local Persistent Image](#rm-deleting-a-local-persistent-image) - * [save: Exporting Cascade Images](#save-exporting-cascade-images) - * [tag: Tagging Local Persistent Images](#tag-tagging-local-persistent-images) - * [info: Viewing the Operating Environment and System Information](#info-viewing-the-operating-environment-and-system-information) - * [login: Logging In to the Remote Image Repository](#login-logging-in-to-the-remote-image-repository) - * [logout: Logging Out of the Remote Image Repository](#logout-logging-out-of-the-remote-image-repository) - * [version: Querying the isula-build Version](#version-querying-the-isula-build-version) -* [Directly Integrating a Container Engine](#directly-integrating-a-container-engine) - * [Integration with iSulad](#integration-with-isulad) - * [Integration with Docker](#integration-with-docker) -* [\Appendix](#span-idappendixappendix) - * [Command Line Parameters](#command-line-parameters) - * [Communication Matrix](#communication-matrix) - * [File and Permission](#file-and-permission) - - - - -## Overview - -isula-build is a container image build tool developed by the iSula container team. It allows you to quickly build container images using Dockerfiles. - -The isula-build uses the server/client mode. The isula-build functions as a client and provides a group of command line tools for image build and management. The isula-builder functions as the server, processes client management requests, and functions as the daemon process in the background. - -![isula-build architecure](./figures/isula-build_arch.png) - -Note: - -- Currently, isula-build supports only Docker images. - -## Installation - -### Preparations - -To ensure that isula-build can be successfully installed, the following software and hardware requirements must be met: - -- Supported architectures: x86_64 and AArch64 -- Supported OS: openEuler -- You have the permissions of the root user. - -#### Installing isula-build - -Before using isula-build to build a container image, you need to install the following software packages: - - - -**(Recommended) Method 1: Using YUM** - -1. Configure the openEuler yum source. - -2. Log in to the target server as the root user and install isula-build. - - ``` - sudo yum install -y isula-build - ``` - - - -**Method 2: Using the RPM Package** - -1. Obtain the isula-build-*.rpm installation package from the openEuler yum source, for example, isula-build-0.9.3-1.oe1.x86_64.rpm. - -2. Upload the obtained RPM software package to any directory on the target server, for example, /home/. - -3. Log in to the target server as the root user and run the following command to install isula-build: - - ``` - sudo rpm -ivh /home/isula-build-*.rpm - ``` - -> **Note:** -After the installation is complete, you need to manually start the isula-build service. For details about how to start the service, see "Managing the isula-build Service." - -## Configuring and Managing the isula-build Service - -### Configuring the isula-build Service - -After the isula-build software package is installed, the systemd starts the isula-build service based on the default configuration contained in the isula-build software package on the isula-build server. If the default configuration file on the isula-build server cannot meet your requirements, perform the following operations to customize the configuration file: After the default configuration is modified, restart the isula-build server for the new configuration to take effect. For details, see "Managing the isula-build Service." - -Currently, the isula-build server contains the following configuration file: - -- /etc/isula-build/configuration.toml: general isula-builder configuration file, which is used to set the isula-builder log level, persistency directory, runtime directory, and OCI runtime. Parameters in the configuration file are described as follows: - -| Configuration Item | Mandatory or Optional | Description | Value | -| --------- | -------- | --------------------------------- | ----------------------------------------------- | -| debug | Optional | Indicates whether to enable the debug log function. | true: Enable the debug log function. false: Disable the debug log function. | -| loglevel | Optional | Sets the log level. | debug
info
warn
error | -| run_root | Mandatory | Sets the root directory of runtime data. | For example, /var/run/isula-build/ | -| data_root | Mandatory | Sets the local persistency directory. | For example, /var/lib/isula-build/ | -| runtime | Optional | Sets the runtime type. Currently, only runc is supported. | runc | - - -- /etc/isula-build/storage.toml: configuration file for local persistent storage, including the configuration of the storage driver in use. - -| Configuration Item | Mandatory or Optional | Description | -| ------ | -------- | ------------------------------ | -| driver | Optional | Storage driver type. Currently, overlay2 is supported. | - - For more settings, see [containers-storage.conf.5.md](https://github.com/containers/storage/blob/master/docs/containers-storage.conf.5.md). - - -- /etc/isula-build/registries.toml: configuration file for each image repository. - -| Configuration Item | Mandatory or Optional | Description | -| ------------------- | -------- | ------------------------------------------------------------ | -| registries.search | Optional | Search domain of the image repository. Only listed image repositories can be found. | -| registries.insecure | Optional | Accessible insecure image repositories. Listed image repositories cannot pass the authentication and are not recommended. | - - For more settings, see [containers-registries.conf.5.md](https://github.com/containers/image/blob/master/docs/containers-registries.conf.5.md). - -- /etc/isula-build/policy.json: image pull/push policy file. Note: Currently, this parameter cannot be configured. - ->![](./public_sys-resources/icon-note.gif) **Note:** -> -> - isula-build supports the preceding configuration file with the maximum size of 1 MiB. -> - The persistent working directory dataroot cannot be configured on the memory disk, for example, tmpfs. -> - Currently, only overlay2 can be used as the underlying graphdriver. - - - -### Managing the isula-build Service - -Currently, openEuler uses systemd to manage the isula-build service. The isula-build software package contains the systemd service file. After installing the isula-build software package, you can use the systemd tool to start or stop the isula-build service. You can also manually start the isula-builder software. Note that only one isula-builder process can be started on a node at a time. - ->![](./public_sys-resources/icon-note.gif) **Note:** -> Only one isula-builder process can be started on a node at a time. - -#### (Recommended) Using systemd for Management - -You can run the following systemd commands to start, stop, and restart the isula-build service: - -- Run the following command to start the isula-build service: - - ```sh - sudo systemctl start isula-build.service - ``` - -- Run the following command to stop the isula-build service: - - ```sh - sudo systemctl stop isula-build.service - ``` - -- Run the following command to restart the isula-builder service: - - ```sh - sudo systemctl restart isula-build.service - ``` - -The systemd service file of the isula-build software installation package is stored in the `/usr/lib/systemd/system/isula-build.service` directory. If you need to modify the systemd configuration of the isula-build service, modify the file and run the following command to make the modification take effect. Then restart the isula-build service based on the systemd management command. - -```sh -sudo systemctl daemon-reload -``` - -#### Directly Running isula-builder - -You can also run the isula-builder command on the server to start the service. The isula-builder command can contain flags for service startup. The following flags are supported: - -- -D, --debug: whether to enable the debugging mode. -- --log-level: log level. The options are debug, info, warn, and error. The default value is info. -- --dataroot: local persistency directory. The default value is /var/lib/isula-build/. -- --runroot: runtime directory. The default value is /var/run/isula-build/. -- --storage-driver: underlying storage driver type. -- --storage-opt: underlying storage driver configuration. - ->![](./public_sys-resources/icon-note.gif) **Note:** -> If the command line startup parameters contain the same configuration options as those in the configuration file, the command line parameters are preferentially used for startup. - -Start the isula-build service. For example, to specify the local persistency directory /var/lib/isula-build and disable debugging, run the following command: - -```sh -sudo isula-builder --dataroot "/var/lib/isula-build" --debug=false -``` - -## Usage Guidelines - -### Prerequisites - -isula-build depends on the executable file runc to build the RUN command in the Dockerfile. Therefore, the runc must be pre-installed in the running environment of isula-build. The installation method depends on the application scenario. If you do not need to use the complete docker-engine tool chain, you can install only the docker-runc RPM package. - -```sh -sudo yum install -y docker-runc -``` - -If you need to use a complete docker-engine tool chain, install the docker-engine RPM package, which contains the executable file runc by default. - -```sh -sudo yum install -y docker-engine -``` - ->![](./public_sys-resources/icon-note.gif) **Note:** ->Users must ensure the security of OCI runtime (runc) executable files to prevent malicious replacement. - - - -### Overview - -The isula-build client provides a series of commands for building and managing container images. Currently, the isula-build client provides the following command lines: - -- ctr-img: manages container images. The ctr-img command contains the following subcommands: - - build: builds a container image based on the specified Dockerfile. - - images: lists local container images. - - import: imports a basic container image. - - load: imports a cascade image. - - rm: deletes a local container image. - - save: exports a cascade image to a local disk. - - tag: adds a tag to a local container image. -- info: displays the running environment and system information of isula-build. -- login: logs in to the remote container image repository. -- logout: logs out of the remote container image repository. -- version: displays the versions of isula-build and isula-builder. - ->![](./public_sys-resources/icon-note.gif) **Note:** -The isula-build completion and isula-builder completion commands are used to generate the bash command completion script. This command is implicitly provided by the command line framework and is not displayed in the help information. - - - -The following describes how to use these commands in detail. - - - - -### ctr-img: Container Image Management - -The isula-build command groups all container image management commands into the `ctr-img` command. The command is as follows: - -``` -isula-build ctr-img [command] -``` - -#### build: Container Image Build - -The subcommand build of the ctr-img command is used to build container images. The command is as follows: - -``` -isula-build ctr-img build [flags] -``` - -The build command contains the following flags: - -- --build-arg: string list, which contains variables required during the build process. -- --build-static: key value, which is used to build binary equivalence. Currently, the following key values are included: - - build-time: string, which indicates that a fixed timestamp is used to build a container image. The timestamp format is YYYY-MM-DD HH-MM-SS. -- -f, --filename: string, which indicates the path of the Dockerfiles. If this parameter is not specified, the current path is used. -- --iidfile: string, which indicates the ID of the image output to a local file. -- -o, --output: string, which indicates the image export mode and path. -- --proxy: Boolean, which inherits the proxy environment variable on the host. The default value is true. -- --tag: string, which indicates the tag value of the image that is successfully built. -- --cap-add: string list, which contains permissions required by the RUN command during the build process. - -** The following describes the flags in detail. ** - -**\--build-arg** - -Parameters in the Dockerfile are inherited from the command lines. The usage is as follows: - -```sh -$ echo "This is bar file" > bar.txt -$ cat Dockerfile_arg -FROM busybox -ARG foo -ADD ${foo}.txt . -RUN cat ${foo}.txt -$ sudo isula-build ctr-img build --build-arg foo=bar -f Dockerfile_arg -STEP 1: FROM busybox -Getting image source signatures -Copying blob sha256:8f52abd3da461b2c0c11fda7a1b53413f1a92320eb96525ddf92c0b5cde781ad -Copying config sha256:e4db68de4ff27c2adfea0c54bbb73a61a42f5b667c326de4d7d5b19ab71c6a3b -Writing manifest to image destinationStoring signatures -STEP 2: ARG foo -STEP 3: ADD ${foo}.txt . -STEP 4: RUN cat ${foo}.txt -This is bar file -Getting image source signatures -Copying blob sha256:6194458b07fcf01f1483d96cd6c34302ffff7f382bb151a6d023c4e80ba3050a -Copying blob sha256:6bb56e4a46f563b20542171b998cb4556af4745efc9516820eabee7a08b7b869 -Copying config sha256:39b62a3342eed40b41a1bcd9cd455d77466550dfa0f0109af7a708c3e895f9a2 -Writing manifest to image destination -Storing signatures -Build success with image id: 39b62a3342eed40b41a1bcd9cd455d77466550dfa0f0109af7a708c3e895f9a2 -``` - -**\--build-static** - -Specifies a static build. That is, when isula-build is used to build a container image, differences between all timestamps and other build factors (such as the container ID and hostname) are eliminated. Finally, a container image that meets the static requirements is built. - -When isula-build is used to build a container image, assume that a fixed timestamp is given to the build subcommand and the following conditions are met: - -- The build environment is consistent before and after the upgrade. -- The Dockerfile is consistent before and after the build. -- The intermediate data generated before and after the build is consistent. -- The build commands are the same. -- The versions of the third-party libraries are the same. - -For container image build, isula-build supports the same Dockerfile. If the build environments are the same, the image content and image ID generated in multiple builds are the same. - -–build-static supports the key-value pair option in the k=v format. Currently, the following options are supported: - -- build-time: string, which indicates the fixed timestamp for creating a static image. The value is in the format of YYYY-MM-DD HH-MM-SS. The timestamp affects the attribute of the file for creating and modifying the time at the diff layer. - - Example: - - ```sh - $ sudo isula-build ctr-img build -f Dockerfile --build-static='build-time=2020-05-23 10:55:33' . - ``` - - In this way, the container images and image IDs built in the same environment for multiple times are the same. - -**\--iidfile** - -Run the following command to output the ID of the built image to a file: - -``` -isula-build ctr-img build --iidfile filename -``` - -For example, to export the container image ID to the testfile file, run the following command: - - ```sh -$ sudo isula-build ctr-img build -f Dockerfile_arg --iidfile testfile - ``` - - Check the container image ID in the testfile file. - - ```sh -$ cat testfile -76cbeed38a8e716e22b68988a76410eaf83327963c3b29ff648296d5cd15ce7b - ``` - - - -**\-o, --output** - -Currently, -o and –output support the following formats: - -- `isulad:image:tag`: directly pushes the image that is successfully built to iSulad, for example, `-o isulad:busybox:latest`. Pay attention to the following restrictions: - - - isula-build and iSulad must be on the same node. - - The tag must be configured. - - On the isula-build client, you need to temporarily save the successfully built image as `/var/tmp/isula-build-tmp-%v.tar` and then import it to iSulad. Ensure that the `/var/tmp/` directory has sufficient disk space. - -- `docker-daemon:image:tag`: directly pushes the successfully built image to Docker daemon, for example, `-o docker-daemon:busybox:latest`. Pay attention to the following restrictions: -- isula-build and Docker must be on the same node. - - The tag must be configured. - -- `docker://registry.example.com/repository:tag`: directly pushes the successfully built image to the remote image repository, for example, `-o docker://localhost:5000/library/busybox:latest`. - -- `docker-archive:/:image:tag`: saves the successfully built image to the local host in Docker image format, for example, `-o docker-archive:/root/image.tar:busybox:latest`. - -In addition to flags, the build subcommand also supports an argument whose type is string and meaning is context, that is, the context of the Dockerfile build environment. The default value of this parameter is the current path where isula-build is executed. This path affects the path retrieved by the ADD and COPY commands of .dockerignore and Dockerfile. - -**\--proxy** - -Specifies whether the container started by the RUN command inherits the proxy-related environment variables http_proxy, https_proxy, ftp_proxy, no_proxy, HTTP_PROXY, HTTPS_PROXY, and FTP_PROXY. The default value of NO_PROXY is true. - -When a user configures proxy-related ARG or ENV in the Dockerfile, the inherited environment variables will be overwritten. - -Note: If the client and daemon are not running on the same terminal, the environment variables that can be inherited are the environment variables of the terminal where the daemon is located. - -**\--tag** - -Specifies the tag of the image stored on the local disk after the image is successfully built. - -**\--cap-add** - -Run the following command to add the permission required by the RUN command during the build process: - -``` -isula-build ctr-img build --cap-add ${CAP} -``` - -Example: - -```sh -$ sudo isula-build ctr-img build --cap-add CAP_SYS_ADMIN --cap-add CAP_SYS_PTRACE -f Dockerfile -``` - -> **Note:** -> -> - A maximum of 100 container images can be concurrently built. -> - isula-build supports Dockerfiles with a maximum size of 1 MiB. -> - isula-build supports the .dockerignore file with a maximum size of 1 MiB. -> - Ensure that only the current user has the read and write permissions on the Dockerfiles to prevent other users from tampering with the files. -> - During the build, the RUN command starts the container to build in the container. Currently, isula-build supports the host network only. -> - isula-build only supports the tar.gz compression format. -> - isula-build commits once after each image build stage is complete, instead of each time a Dockerfile line is executed. -> - isula-build does not support cache build. -> - isula-build starts the build container only when the RUN command is built. -> - Currently, the history function of Docker images is not supported. -> - The stage name can start with a digit. -> - The stage name can contain a maximum of 64 characters. -> - isula-build does not support resource restriction on a single Dockerfile build. If resource restriction is required, you can configure a resource limit on the isula-builder. -> - Currently, isula-build does not support a remote URL as the data source of the ADD command in the Dockerfile. - - - -#### image: Viewing Local Persistent Build Images - -You can run the images command to view the images in the local persistent storage. - -```sh -$ sudo isula-build ctr-img images ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -REPOSITORY TAG IMAGE ID CREATED SIZE ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -localhost:5000/library/alpine latest a24bb4013296 2020-20-19 19:59:197 5.85 MB - 39b62a3342ee 2020-20-38 38:66:387 1.45 MB ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -``` - -**Note**: The image size displayed by running the `isula-build ctr-img images` command may be different from that displayed by running the `docker images` command. When calculating the image size, isula-build directly calculates the total size of .tar packages at each layer, while Docker calculates the total size of files by decompressing the .tar package and traversing the diff directory. Therefore, the statistics are different. - - - -#### import: Importing a Basic Container Image - -openEuler releases a basic container image, for example, openEuler-docker.x86_64.tar.xz, with the version. You can run the `ctr-img import` command to import the image to isula-build. - -The command is as follows: - -``` -isula-build ctr-img import [flags] -``` - -Example: - -```sh -$ sudo isula-build ctr-img import ./openEuler-docker.x86_64.tar.xz openeuler:21.03 -Import success with image id: 7317851cd2ab33263eb293f68efee9d724780251e4e92c0fb76bf5d3c5585e37 -$ sudo isula-build ctr-img images ----------------------------------------------- -------------------- ----------------- ------------------------ ------------ -REPOSITORY TAG IMAGE ID CREATED SIZE ----------------------------------------------- -------------------- ----------------- ------------------------ ------------ -openeuler 21.03 7317851cd2ab 2021-03-15 06:25:34 500 MB ----------------------------------------------- -------------------- ----------------- ------------------------ ------------ -``` - ->![](./public_sys-resources/icon-note.gif) **Note** ->isula-build supports the import of container basic images with a maximum size of 1 GiB. - - - -#### load: Importing Cascade Images - -Cascade images are images that are saved to the local computer by running the docker save or isula-build ctr-img save command. The compressed image package contains a layer-by-layer image package named layer.tar. You can run the ctr-img load command to import the image to isula-build. - -The command is as follows: - -``` -isula-build ctr-img load [flags] -``` - -Currently, the following flags are supported: - -- -i, --input: path of the local .tar package. - -Example: - -```sh -$ sudo isula-build ctr-img load -i ubuntu.tarGetting image source signatures -Copying blob sha256:cf612f747e0fbcc1674f88712b7bc1cd8b91cf0be8f9e9771235169f139d507c -Copying blob sha256:f934e33a54a60630267df295a5c232ceb15b2938ebb0476364192b1537449093 -Copying blob sha256:943edb549a8300092a714190dfe633341c0ffb483784c4fdfe884b9019f6a0b4 -Copying blob sha256:e7ebc6e16708285bee3917ae12bf8d172ee0d7684a7830751ab9a1c070e7a125 -Copying blob sha256:bf6751561805be7d07d66f6acb2a33e99cf0cc0a20f5fd5d94a3c7f8ae55c2a1 -Copying blob sha256:c1bd37d01c89de343d68867518b1155cb297d8e03942066ecb44ae8f46b608a3 -Copying blob sha256:a84e57b779297b72428fc7308e63d13b4df99140f78565be92fc9dbe03fc6e69 -Copying blob sha256:14dd68f4c7e23d6a2363c2320747ab88986dfd43ba0489d139eeac3ac75323b2 -Copying blob sha256:a2092d776649ea2301f60265f378a02405539a2a68093b2612792cc65d00d161 -Copying blob sha256:879119e879f682c04d0784c9ae7bc6f421e206b95d20b32ce1cb8a49bfdef202 -Copying blob sha256:e615448af51b848ecec00caeaffd1e30e8bf5cffd464747d159f80e346b7a150 -Copying blob sha256:f610bd1e9ac6aa9326d61713d552eeefef47d2bd49fc16140aa9bf3db38c30a4 -Copying blob sha256:bfe0a1336d031bf5ff3ce381e354be7b2bf310574cc0cd1949ad94dda020cd27 -Copying blob sha256:f0f15db85788c1260c6aa8ad225823f45c89700781c4c793361ac5fa58d204c7 -Copying config sha256:c07ddb44daa97e9e8d2d68316b296cc9343ab5f3d2babc5e6e03b80cd580478e -Writing manifest to image destination -Storing signatures -Loaded image as c07ddb44daa97e9e8d2d68316b296cc9343ab5f3d2babc5e6e03b80cd580478e -``` - ->![](./public_sys-resources/icon-note.gif) **Note:** -> ->The - isula-build load command can only be used to import a compressed image file that contains a single cascade image. -> ->- isula-build allows you to import a container image with a maximum size of 50 GB. - - - -#### rm: Deleting a Local Persistent Image - -You can run the rm command to delete an image from the local persistent storage. The command is as follows: - -``` -isula-build ctr-img rm IMAGE [IMAGE...] [FLAGS] -``` - -Currently, the following flags are supported: - -- -a, –all: deletes all images stored locally. -- -p, –prune: deletes all images that are stored locally and do not have tags. - -Example: - -```sh -$ sudo isula-build ctr-img rm -p -Deleted: sha256:78731c1dde25361f539555edaf8f0b24132085b7cab6ecb90de63d72fa00c01d -Deleted: sha256:eeba1bfe9fca569a894d525ed291bdaef389d28a88c288914c1a9db7261ad12c -``` - - - -#### save: Exporting Cascade Images - -You can run the save command to export the cascade images to the local disk. The command is as follows: - -``` -isula-build ctr-img save [REPOSITORY:TAG]|imageID -o xx.tar -``` - -The following example shows how to export an image in `image/tag` format: - -```sh -$ sudo isula-build ctr-img save busybox:latest -o busybox.tar -Getting image source signatures -Copying blob sha256:50644c29ef5a27c9a40c393a73ece2479de78325cae7d762ef3cdc19bf42dd0a -Copying blob sha256:824082a6864774d5527bda0d3c7ebd5ddc349daadf2aa8f5f305b7a2e439806f -Copying blob sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef -Copying config sha256:21c3e96ac411242a0e876af269c0cbe9d071626bdfb7cc79bfa2ddb9f7a82db6 -Writing manifest to image destination -Storing signatures -Save success with image: busybox:latest -``` - -The following example shows how to export an image in `ImageID` format: - -```sh -$ sudo isula-build ctr-img save 21c3e96ac411 -o busybox.tar -Getting image source signatures -Copying blob sha256:50644c29ef5a27c9a40c393a73ece2479de78325cae7d762ef3cdc19bf42dd0a -Copying blob sha256:824082a6864774d5527bda0d3c7ebd5ddc349daadf2aa8f5f305b7a2e439806f -Copying blob sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef -Copying config sha256:21c3e96ac411242a0e876af269c0cbe9d071626bdfb7cc79bfa2ddb9f7a82db6 -Writing manifest to image destination -Storing signatures -Save success with image: 21c3e96ac411 -``` - - - -#### tag: Tagging Local Persistent Images - -You can run the tag command to add a tag to a local persistent container image. The command is as follows: - -``` -isula-build ctr-img tag / busybox:latest -``` - -Example: - -```sh -$ sudo isula-build ctr-img images ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -REPOSITORY TAG IMAGE ID CREATED SIZE ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -alpine latest a24bb4013296 2020-05-29 21:19:46 5.85 MB ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -$ sudo isula-build ctr-img tag a24bb4013296 alpine:v1 -$ sudo isula-build ctr-img images ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -REPOSITORY TAG IMAGE ID CREATED SIZE ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -alpine latest a24bb4013296 2020-05-29 21:19:46 5.85 MB -alpine v1 a24bb4013296 2020-05-29 21:19:46 5.85 MB ----------------------------------------------- ----------- ----------------- -------------------------- ------------ -``` - - - -### info: Viewing the Operating Environment and System Information - -You can run the isula-build info command to view the running environment and system information of isula-build. The command is as follows: - -``` - isula-build info [flags] -``` - -The following flags are supported: - -- -H, –human-readable: Boolean. The memory information is printed in the common memory format. The value is 1000 power. - -Example: - -```sh -$ sudo isula-build info -H - General: - MemTotal: 7.63 GB - MemFree: 757 MB - SwapTotal: 8.3 GB - SwapFree: 8.25 GB - OCI Runtime: runc - DataRoot: /var/lib/isula-build/ - RunRoot: /var/run/isula-build/ - Builders: 0 - Goroutines: 12 - Store: - Storage Driver: overlay - Backing Filesystem: extfs - Registry: - Search Registries: - oepkgs.net - Insecure Registries: - localhost:5000 - oepkgs.net -``` - -### login: Logging In to the Remote Image Repository - -You can run the login command to log in to the remote image repository. The command is as follows: - -``` - isula-build login SERVER [FLAGS] -``` - -Currently, the following flags are supported: - -``` - Flags: - -p, --password-stdin Read password from stdin - -u, --username string Username to access registry -``` - -Enter the password through stdin. In the following example, the password in creds.txt is transferred to the stdin of isula-build through a pipe for input. - -```sh - $ cat creds.txt | sudo isula-build login -u cooper -p mydockerhub.io - Login Succeeded -``` - -Enter the password in interactive mode. - -```sh - $ sudo isula-build login mydockerhub.io -u cooper - Password: - Login Succeeded -``` - -### logout: Logging Out of the Remote Image Repository - -You can run the logout command to log out of the remote image repository. The command is as follows: - -``` - isula-build logout [SERVER] [FLAGS] -``` - -Currently, the following flags are supported: - -``` - Flags: - -a, --all Logout all registries -``` - -Example: - -```sh - $ sudo isula-build logout -a - Removed authentications -``` - -### version: Querying the isula-build Version - -You can run the version command to view the current version information. - -```sh - $ sudo isula-build version - Client: - Version: 0.9.2 - Go Version: go1.13.3 - Git Commit: ccb2a13 - Built: Sat Aug 22 08:06:47 2020 - OS/Arch: linux/amd64 - - Server: - Version: 0.9.2 - Go Version: go1.13.3 - Git Commit: ccb2a13 - Built: Sat Aug 22 08:06:47 2020 - OS/Arch: linux/amd64 -``` - - -## Directly Integrating a Container Engine - -isula-build can be integrated with iSulad or Docker to import the built container image to the local storage of the container engine. - -### Integration with iSulad - -Images that are successfully built can be directly exported to the iSulad. - -Example: - -```sh -$ sudo isula-build ctr-img build -f Dockerfile -o isulad:busybox:2.0 -``` - -Specify iSulad in the -o parameter to export the built container image to iSulad. You can query the image using isula images. - -```sh -$ sudo isula images -isula images -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox 2.0 2d414a5cad6d 2020-08-01 06:41:36 5.577 MB -``` - ->![](./public_sys-resources/icon-note.gif) **Note:** -> - It is required that isula-build and iSulad be on the same node. -> - When an image is directly exported to the iSulad, the isula-build client needs to temporarily store the successfully built image as `/var/tmp/isula-build-tmp-%v.tar` and then import it to the iSulad. Ensure that the /var/tmp/ directory has sufficient disk space. If the isula-build client process is killed or Ctrl+C is pressed during the export, you need to manually clear the `/var/tmp/isula-build-tmp-%v.tar` file. - -### Integration with Docker - -Images that are successfully built can be directly exported to the Docker daemon. - -Example: - -```sh -$ sudo isula-build ctr-img build -f Dockerfile -o docker-daemon:busybox:2.0 -``` - -Specify docker-daemon in the -o parameter to export the built container image to Docker. You can run the docker images command to query the image. - -```sh -$ sudo docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -busybox 2.0 2d414a5cad6d 2 months ago 5.22MB -``` - ->![](./public_sys-resources/icon-note.gif) **Note:** -> -> - The isula-build and Docker must be on the same node. - -## \Appendix - - -### Command Line Parameters - -**Table 1** Parameters in the ctr-img build command - -| **Command** | **Parameter** | **Description** | -| ------------- | -------------- | ------------------------------------------------------------ | -| ctr-img build | --build-arg | String list, which contains variables required during the build. | -| | --build-static | Key value, which is used to build binary equivalence. Currently, the following key values are included: - build-time: string, which indicates that a fixed timestamp is used to build a container image. The timestamp format is YYYY-MM-DD HH-MM-SS. | -| | -f, --filename | String, which indicates the path of the Dockerfiles. If this parameter is not specified, the current path is used. | -| | --iidfile | String, which indicates the ID of the image output to a local file. | -| | -o, --output | String, which indicates the image export mode and path.| -| | --proxy | Boolean, which inherits the proxy environment variable on the host. The default value is true. | -| | --tag | String, which indicates the tag value of the image that is successfully built. | -| | --cap-add | String list, which contains permissions required by the RUN command during the build process.| - -**Table 2** Parameters in the ctr-img load command - -| **Command** | **Parameter** | **Description** | -| ------------ | ----------- | --------------------------------- | -| ctr-img load | -i, --input | String, Path of the local .tar package to be imported| - -**Table 3** Parameters in the ctr-img rm command - -| **Command** | **Parameter** | **Description** | -| ---------- | ----------- | --------------------------------------------- | -| ctr-img rm | -a, --all | Boolean, which is used to delete all local persistent images. | -| | -p, --prune | Boolean, which is used to delete all images that are stored persistently on the local host and do not have tags. | - -**Table 4** Parameters in the ctr-img save command - -| **Command** | **Parameter** | **Description** | -| ------------ | ------------ | ---------------------------------- | -| ctr-img save | -o, --output | String, which indicates the local path for storing the exported images.| - -**Table 5** Parameters in the login command - -| **Command** | **Parameter** | **Description** | -| -------- | -------------------- | ------------------------------------------------------- | -| login | -p, --password-stdin | Boolean, which indicates whether to read the password through stdin. or enter the password in interactive mode. | -| | -u, --username | String, which indicates the username for logging in to the image repository.| - -**Table 6** Parameters in the logout command - -| **Command** | **Parameter** | **Description** | -| -------- | --------- | ------------------------------------ | -| logout | -a, --all | Boolean, which indicates whether to log out of all logged-in image repositories. | - -### Communication Matrix - -The isula-build component processes communicate with each other through the Unix socket file. No port is used for communication. - -### File and Permission - -- All isula-build operations must be performed by the root user. - -- The following table lists the file permissions involved in the running of isula-build. - -| **File Path** | **File/Folder Permission** | **Description** | -| ------------------------------------------- | ------------------- | ------------------------------------------------------------ | -| /usr/bin/isula-build | 550 | Binary file of the command line tool. | -| /usr/bin/isula-builder | 550 | Binary file of the isula-builder process on the server. | -| /usr/lib/systemd/system/isula-build.service | 640 | systemd configuration file, which is used to manage the isula-build service. | -| /etc/isula-build/configuration.toml | 600 | General isula-builder configuration file, which sets the isula-builder log level, persistency directory, runtime directory, and OCI runtime. | -| /etc/isula-build/policy.json | 600 | Syntax file of the signature verification policy file. | -| /etc/isula-build/registries.toml | 600 | Configuration file of each image repository, including the available image repository list and image repository blacklist. | -| /etc/isula-build/storage.toml | 600 | Configuration file for local persistent storage, including the configuration of the used storage driver. | -| /var/run/isula_build.sock | 600 | Local socket of isula-builder. | -| /var/lib/isula-build | 700 | Local persistency directory. | -| /var/run/isula-build | 700 | Local runtime directory. | -| /var/tmp/isula-build-tmp-*.tar | 600 | Local directory for temporarily storing the images when they are exported to the iSulad. | - - diff --git a/docs/en/docs/Container/isulad-container-engine.md b/docs/en/docs/Container/isulad-container-engine.md deleted file mode 100644 index 54cd5ca2112776a9d584b4eb2e5132607a5dd743..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/isulad-container-engine.md +++ /dev/null @@ -1,12 +0,0 @@ -# iSulad Container Engine - - -Compared with Docker, iSulad is a new container solution with a unified architecture design to meet different requirements in the CT and IT fields. Lightweight containers are implemented using C/C++. They are smart, fast, and not restricted by hardware and architecture. With less noise floor overhead, the containers can be widely used. - -[Figure 1](#en-us_topic_0182207099_fig10763114141217) shows the unified container architecture. - -**Figure 1** Unified container architecture - - -![](./figures/en-us_image_0183048952.png) - diff --git a/docs/en/docs/Container/local-volume-management.md b/docs/en/docs/Container/local-volume-management.md deleted file mode 100644 index df43aed406db776f03cf7e8d620267c2166cd85a..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/local-volume-management.md +++ /dev/null @@ -1,206 +0,0 @@ -#Local Volume Management - - - -- [Local Volume Management](#local-volume-management) - - [Overview](#overview) - - [Precautions](#precautions) - - [Usage](#usage) - - [Using the -v Option to Mount Data](#using-the--v-option-to-mount-data) - - [**Format**](#format) - - [**Functions**](#functions) - - [**Parameter Description**](#parameter-description) - - [**Examples**](#examples) - - [Using the --mount Option to Mount Data](#using-the---mount-option-to-mount-data) - - [**Format**](#format-1) - - [**Functions**](#functions-1) - - [**Parameter Description**](#parameter-description-1) - - [**Examples**](#examples-1) - - [Reusing the Mounting Configuration in Other Containers](#reusing-the-mounting-configuration-in-other-containers) - - [**Format**](#format-2) - - [**Functions**](#functions-2) - - [**Parameter Description**](#parameter-description-2) - - [**Examples**](#examples-2) - - [Using the Anonymous Volume in an Image](#using-the-anonymous-volume-in-an-image) - - [Querying a Volume](#querying-a-volume) - - [**Format**](#format-3) - - [**Functions**](#functions-3) - - [**Parameter Description**](#parameter-description-3) - - [**Examples**](#examples-3) - - [Deleting a Volume](#deleting-a-volume) - - [**Format**](#format-4) - - [**Functions**](#functions-4) - - [**Parameter Description**](#parameter-description-4) - - [**Examples**](#examples-4) - - [Precautions](#precautions-1) - - [Conflict Combination Rules](#conflict-combination-rules) - - [Differences Between iSula and Docker](#differences-between-isula-and-docker) - - - -## Overview - -After a container managed by iSula is destroyed, all data in the container is destroyed. If you want to retain data after the container is destroyed, a data persistence mechanism is required. iSula allows files, directories, or volumes on a host to be mounted to a container at runtime. You can write the data to be persisted to the mount point in the container. After the container is destroyed, the files, directories, and volumes on the host are retained. If you need to delete a file, directory, or volume on the host, you can manually delete the file or directory, or run the iSula command to delete the volume. Currently, the iSula supports only local volume management. Local volumes are classified into named volumes and anonymous volumes. A volume whose name is specified by a user is called a named volume. If a user does not specify a name for a volume, iSula automatically generates a name (a 64-bit random number) for the volume, that is, an anonymous volume. - -The following describes how to use iSula to manage local volumes. - -## Precautions - -- The volume name contains 2 to 64 characters and complies with the regular expression ^[a-zA-Z0-9][a-zA-Z0-9_.-]{1,63}$. That is, the first character of the volume name must be a letter or digit, and other characters can be letters, digits, underscores (_), periods (.), and hyphens (-). -- During container creation, if data exists at the mount point of the container corresponding to the volume, the data is copied to the volume by default. If the iSula breaks down or restarts or the system is powered off during the copy process, the data in the volume may be incomplete. In this case, you need to manually delete the volume or the data in the volume to ensure that the data is correct and complete. - -## Usage - -### Using the -v Option to Mount Data - -#### **Format** - -```shell -isula run -v [SRC:]DST[:MODE,MODE...] IMAGE -``` - -#### **Functions** - -When you create and run a container, use the -v/--volume option to mount the files, directories, or volumes on the host to the container for data persistence. - -#### **Parameter Description** -- SRC: Path of the file, directory, or volume to be mounted on the host. If the value is an absolute path, a file or folder on the host is mounted. If the value is a volume name, a volume is mounted. If this parameter is not specified, an anonymous volume is mounted. If a folder or volume does not exist, iSula creates a folder or volume and then mounts it. -- DST: Mount path in the container. The value must be an absolute path. -- MODE: When the source to be mounted is a directory or file, the valid parameters are ro, rw, z, Z, private, rprivate, slave, rslave, shared, and rshared. Only one parameter of the same type can be configured. If the source is a volume, the valid parameters are ro, rw, z, Z, and nocopy. Only one parameter of the same type can be configured. Use commas (,) to separate multiple attributes. The parameters are described as follows: - -| Parameter | Description | -| -------- | -----------------------------------------------| -| ro | The mount point in the container is mounted in read-only mode. | -| rw | The mount point in the container is mounted in read/write mode. | -| z | If SELinux is enabled, add the SELinux share label during mounting. | -| Z | If SELinux is enabled, add the SELinux private label during mounting. | -| private | The mount point in the container is mounted in private propagation mode. | -| rprivate | The mount point in the container is recursively mounted in private propagation mode. | -| slave | The mount point in the container is mounted in subordinate propagation mode. | -| rslave | The mount point in the container is recursively mounted in subordinate propagation mode. | -| shared | The mount point in the container is mounted in shared propagation mode. | -| rshared | The mount point in the container is recursively mounted in shared propagation mode. | -| nocopy | Data at the mount point is not copied. If this parameter is not set, data is copied by default. In addition, if data already exists in the volume, the data will not be copied. | - - -#### **Examples** -Run the container based on BusyBox, create or mount a volume named vol to the /vol directory of the container, and set the mount point to read-only. In addition, if data exists at the mount point in the container, the data is not copied. -```shell -isula run -v vol:/vol:ro,nocopy busybox -``` - -### Using the --mount Option to Mount Data - -#### **Format** -```shell -isula run --mount [type=TYPE,][src=SRC,]dst=DST[,KEY=VALUE] busybox -``` - -#### **Functions** -When you create and run a container, use the --mount option to mount the files, directories, or volumes on the host to the container for data persistence. - -#### **Parameter Description** -- type: Type of data mounted to the container. The value can be bind, volume, or squashfs. If this parameter is not specified, the default value is volume. -- src: Path of the file, directory, or volume to be mounted on the host. If the value is an absolute path, the file or directory on the host is mounted. If the value is a volume name, a volume is mounted. If this parameter is not specified, the volume is an anonymous volume. If a folder or volume does not exist, iSula creates a file or volume and then mounts it. The keyword src is also called source. -- dst: Mount path in the container. The value must be an absolute path. The keyword dst is also called destination or target. -- KEY=VALUE: Parameter of --mount. The values are as follows: - -| KEY | VALUE | -| ------------------------------ | --------------------------------------------------------------------------- | -| selinux-opts/bind-selinux-opts | z or Z. z indicates that if SELinux is enabled, the SELinux share label is added during mounting. Z indicates that if SELinux is enabled, the SELinux private label is added during mounting. -| ro/readonly | 0/false indicates that the mount is read/write. 1/true indicates that the mount is read-only. If this parameter is not specified, the mount is read-only. The parameter is supported only when type is set to bind. | -| volume-nocopy | Data at the mount point is not copied. If this parameter is not specified, data is copied by default. In addition, if data already exists in the volume, the data will not be copied. This parameter is supported only when type is set to volume. | - -#### **Examples** -Run the container based on BusyBox, create or mount a volume named vol to the /vol directory of the container, and set the mount point to read-only. In addition, if data exists at the mount point in the container, the data is not copied. -```shell -isula run --mount type=volume,src=vol,dst=/vol,ro=true,volume-nocopy=true busybox -``` - -### Reusing the Mounting Configuration in Other Containers - -#### **Format** -```shell -isula run --volumes-from CON1[:MODE] busybox -``` - -#### **Functions** -When you create and run a container, use the --volumes-from option to indicate that the mount point configuration includes that of the CON1 container. You can set multiple --volumes-from options. - -#### **Parameter Description** -- CON1: Name or ID of the container whose mount point is reused. -- MODE: If the value is ro, the mount point is read-only. If the value is rw, the mount point is read/write. - -#### **Examples** -Assume that a container named container1 has been configured with a volume vol1 to the container directory /vol1, and a container named container2 has been configured with a volume vol2 to the container directory /vol2. Run a new container to reuse the mounting configuration of container1 and container2. That is, volume vol1 is mounted to the /vol1 directory of the container, and volume vol2 is mounted to the /vol2 directory of the container. -```shell -isula run --volumes-from container1 --volumes-from container2 busbyox -``` - -### Using the Anonymous Volume in an Image - -You do not need to perform any configuration to use the anonymous volume in the image. If an anonymous volume is configured in the image, iSula automatically creates an anonymous volume and mounts it to the specified path in the image at container runtime. You can write data to the mount point of an anonymous volume in a container for data persistence. - -### Querying a Volume - -#### **Format** -```shell -isula volume ls [OPTIONS] -``` - -#### **Functions** -This command is used to query all volumes managed by iSula. - -#### **Parameter Description** -Option: -- -q,--quit: If this parameter is not specified, only the volume driver information and volume name are queried by default. If this parameter is specified, only the volume name is queried. - -#### **Examples** -This command is used to query all volumes managed by iSula and return only the volume name. -```shell -isula volume ls -q -``` - -### Deleting a Volume - -#### **Format** -``` -isula volume rm [OPTIONS] VOLUME [VOLUME...] -isula volume prune [OPTIONS] -``` - -#### **Functions** -- rm: deletes a specified volume. If the volume is used by a container, the volume fails to be deleted. -- prune: deletes all volumes that are not used by containers. - -#### **Parameter Description** -OPTIONS in the prune command: -- -f,--force: specifies that the system does not display a message asking you whether to delete the volume. By default, a risk message is displayed. You need to enter y to continue the operation. - -#### **Examples** -Delete volumes vol1 and vol2. -```shell -isula volume rm vol1 vol2 -``` -Delete all unused volumes in the following format. No risk message is displayed. -```shell -isula volume prune -f -``` - -### Precautions - -#### Conflict Combination Rules -If a volume mount point conflict occurs, perform the following operations: -- If configurations of -v and --mount conflict, a failure message is returned. -- If the configuration obtained from --volumes-from conflicts with the -v or --mount configuration, the configuration is discarded. -- If the anonymous volume configuration in the image conflicts with the -v, --mount, or --volumes-from configuration, the configuration is discarded. - -#### Differences Between iSula and Docker -| iSula Behavior | Docker Behavior | -| ------------------------------------------- | ------------------------------------------- | -| The volume name can contain a maximum of 64 characters. | The length of the volume name is not limited. | -| If the source to be mounted does not exist, the --mount parameter is created. | If the source to be mounted does not exist, an error is reported. | -| The --mount parameter supports the z or Z parameter configuration in bind-selinux-opts and selinux-opts. | The --mount parameter does not support the parameter configuration in the bind-selinux-opts and selinux-opts. | -| Rules for combining mount point conflicts are not processed. | The anonymous volume specified by -v is processed as the anonymous volume in the image. | -| The volume prune command displays the space that has been reclaimed. | The volume prune command does not display the space that has been reclaimed. | -| -v, --mount, and --volumes-from are configured in hostconfig, and the anonymous volume is configured in config. | The anonymous volume specified by -v is configured in config, and other configurations are configured in hostconfig. | diff --git a/docs/en/docs/Container/managing-the-lifecycle-of-a-secure-container.md b/docs/en/docs/Container/managing-the-lifecycle-of-a-secure-container.md deleted file mode 100644 index c16a2ee9a8a087af3d2b3643aba0a162d85f8b2a..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/managing-the-lifecycle-of-a-secure-container.md +++ /dev/null @@ -1,107 +0,0 @@ -# Managing the Lifecycle of a Secure Container - -- [Managing the Lifecycle of a Secure Container](#managing-the-lifecycle-of-a-secure-container) - - [Starting a Secure Container](#starting-a-secure-container) - - [Stopping a Secure Container](#stopping-a-secure-container) - - [Deleting a Secure Container](#deleting-a-secure-container) - - [Running a New Command in the Container](#running-a-new-command-in-the-container) - - - - -## Starting a Secure Container - -You can use the Docker engine or iSulad as the container engine of the secure container. The invoking methods of the two engines are similar. You can select either of them to start a secure container. - -To start a secure container, perform the following steps: - -1. Ensure that the secure container component has been correctly installed and deployed. -2. Prepare the container image. If the container image is busybox, run the following commands to download the container image using the Docker engine or iSulad: - - ``` - docker pull busybox - ``` - - ``` - isula pull busybox - ``` - -3. Start a secure container. Run the following commands to start a secure container using the Docker engine and iSulad: - - ``` - docker run -tid --runtime kata-runtime --network none busybox - ``` - - ``` - isula run -tid --runtime kata-runtime --network none busybox - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The secure container supports the CNI network only and does not support the CNM network. The **-p** and **--expose** options cannot be used to expose container ports. When using a secure container, you need to specify the **--net=none** option. - -4. Start a pod. - 1. Start the pause container and obtain the sandbox ID of the pod based on the command output. Run the following commands to start a pause container using the Docker engine and iSulad: - - ``` - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=podsandbox - ``` - - ``` - isula run -tid --runtime kata-runtime --network none --annotation io.kubernetes.cri.container-type=sandbox - ``` - -    - - 1. Create a service container and add it to the pod. Run the following commands to create a service container using the Docker engine and iSulad: - - ``` - docker run -tid --runtime kata-runtime --network none --annotation io.kubernetes.docker.type=container --annotation io.kubernetes.sandbox.id= busybox - ``` - - ``` - isula run -tid --runtime kata-runtime --network none --annotation io.kubernetes.cri.container-type=container --annotation io.kubernetes.cri.sandbox-id= busybox - ``` - - **--annotation** is used to mark the container type, which is provided by the Docker engine and iSulad, but not provided by the open-source Docker engine in the upstream community. - - - -## Stopping a Secure Container - -- Run the following command to stop a secure container: - - ``` - docker stop - ``` - -- Stop a pod. - - When stopping a pod, note that the lifecycle of the pause container is the same as that of the pod. Therefore, stop service containers before the pause container. - - -## Deleting a Secure Container - -Ensure that the container has been stopped. - -``` -docker rm -``` - -To forcibly delete a running container, run the **-f** command. - -``` -docker rm -f -``` - -## Running a New Command in the Container - -The pause container functions only as a placeholder container. Therefore, if you start a pod, run a new command in the service container. The pause container does not execute the corresponding command. If only one container is started, run the following command directly: - -``` -docker exec -ti -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->1. If the preceding command has no response because another host runs the **docker restart** or **docker stop** command to access the same container, you can press **Ctrl**+**P**+**Q** to exit the operation. ->2. If the **-d** option is used, the command is executed in the background and no error information is displayed. The exit code cannot be used to determine whether the command is executed correctly. - diff --git a/docs/en/docs/Container/maximum-number-of-handles.md b/docs/en/docs/Container/maximum-number-of-handles.md deleted file mode 100644 index a8cdb1d40bf2a63c78e36d75b8bc8207b02aeff2..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/maximum-number-of-handles.md +++ /dev/null @@ -1,59 +0,0 @@ -# Maximum Number of Handles - -- [Maximum Number of Handles](#maximum-number-of-handles) - - -## Function Description - -System containers support limit on the number of file handles. File handles include common file handles and network sockets. When starting a container, you can specify the **--files-limit** parameter to limit the maximum number of handles opened in the container. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--files-limit

-

  

-
  • The value cannot be negative and must be an integer.
  • The value 0 indicates that the number is not limited by the parameter. The maximum number is determined by the current kernel files cgroup.
-
- -## Constraints - -- If the value of **--files-limit** is too small, the system container may fail to run the **exec** command and the error "open temporary files" is reported. Therefore, you are advised to set the parameter to a large value. -- File handles include common file handles and network sockets. - -## Example - -To use **--files-limit** to limit the number of file handles opened in a container, run the following command to check whether the kernel supports files cgroup: - -``` -[root@localhost ~]# cat /proc/1/cgroup | grep files -10:files:/ -``` - -If **files** is displayed, files cgroup is supported. - -Start the container, specify the **--files-limit** parameter, and check whether the **files.limit** parameter is successfully written. - -``` -[root@localhost ~]# isula run -tid --files-limit 1024 --system-container --external-rootfs /tmp/root-fs empty init 01e82fcf97d4937aa1d96eb8067f9f23e4707b92de152328c3fc0ecb5f64e91d -[root@localhost ~]# isula exec -it 01e82fcf97d4 bash -[root@localhost ~]# cat /sys/fs/cgroup/files/files.limit -1024 - -``` - -The preceding information indicates that the number of file handles is successfully limited in the container. - diff --git a/docs/en/docs/Container/monitoring-secure-containers.md b/docs/en/docs/Container/monitoring-secure-containers.md deleted file mode 100644 index 2c279b4cb4b8618ed449c7dbf4170a9c0cb7424c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/monitoring-secure-containers.md +++ /dev/null @@ -1,147 +0,0 @@ -# Monitoring Secure Containers - -- [Monitoring Secure Containers](#monitoring-secure-containers) - - -## Description - -The **kata events** command is used to view the status information of a specified container. The information includes but is not limited to the container memory, CPU, PID, Blkio, hugepage memory, and network information. - -## Usage - -``` -kata-runtime events [command options] -``` - -## Parameters - -- **-- interval value**: specifies the query period. If this parameter is not specified, the default query period is 5 seconds. -- **--stats**: displays container information and exits the query. - -## Prerequisites - -The container to be queried must be in the **running** state. Otherwise, the following error message will be displayed: "Container ID \(\) does not exist". - -This command can be used to query the status of only one container. - -## Example - -- The container status is displayed every three seconds. - - ``` - $ kata-runtime events --interval 3s 5779b2366f47 - { - "data": { - "blkio": {}, - "cpu": { - "throttling": {}, - "usage": { - "kernel": 130000000, - "percpu": [ - 214098440 - ], - "total": 214098440, - "user": 10000000 - } - }, - "hugetlb": {}, - "intel_rdt": {}, - "interfaces": [ - { - "name": "lo", - "rx_bytes": 0, - "rx_dropped": 0, - "rx_errors": 0, - "rx_packets": 0, - "tx_bytes": 0, - "tx_dropped": 0, - "tx_errors": 0, - "tx_packets": 0 - } - ], - "memory": { - "cache": 827392, - "kernel": { - "failcnt": 0, - "limit": 9223372036854771712, - "max": 421888, - "usage": 221184 - }, - "kernelTCP": { - "failcnt": 0, - "limit": 0 - }, - "raw": { - "active_anon": 49152, - "active_file": 40960, - "cache": 827392, - "dirty": 0, - "hierarchical_memory_limit": 9223372036854771712, - "hierarchical_memsw_limit": 9223372036854771712, - "inactive_anon": 0, - "inactive_file": 839680, - "mapped_file": 540672, - "pgfault": 6765, - "pgmajfault": 0, - "pgpgin": 12012, - "pgpgout": 11803, - "rss": 4096, - "rss_huge": 0, - "shmem": 32768, - "swap": 0, - "total_active_anon": 49152, - "total_active_file": 40960, - "total_cache": 827392, - "total_dirty": 0, - "total_inactive_anon": 0, - "total_inactive_file": 839680, - "total_mapped_file": 540672, - "total_pgfault": 6765, - "total_pgmajfault": 0, - "total_pgpgin": 12012, - "total_pgpgout": 11803, - "total_rss": 4096, - "total_rss_huge": 0, - "total_shmem": 32768, - "total_swap": 0, - "total_unevictable": 0, - "total_writeback": 0, - "unevictable": 0, - "writeback": 0 - }, - "swap": { - "failcnt": 0, - "limit": 9223372036854771712, - "max": 34201600, - "usage": 1204224 - }, - "usage": { - "failcnt": 0, - "limit": 9223372036854771712, - "max": 34201600, - "usage": 1204224 - } - }, - "pids": { - "current": 1 - }, - "tcp": {}, - "tcp6": {}, - "udp": {}, - "udp6": {} - }, - "id": "5779b2366f47cd1468ebb1ba7c52cbdde3c7d3a5f2af3eefadc8356700fc860b", - "type": "stats" - } - ``` - - -- The query exits after the container status is displayed. - - ``` - kata-runtime events --stats - ``` - - The format of the command output is the same as that of the previous command. However, the output of this command is displayed only once. - - diff --git a/docs/en/docs/Container/privileged-container.md b/docs/en/docs/Container/privileged-container.md deleted file mode 100644 index cd19dffa22c7fe8b214beb9b5227c1bb2db47cf6..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/privileged-container.md +++ /dev/null @@ -1,239 +0,0 @@ -# Privileged Container - -- [Privileged Container](#privileged-container.) - - [Scenarios](#scenarios) - - [Usage Restrictions](#usage-restrictions-1) - - [Usage Guide](#usage-guide) - - -## Scenarios - -By default, iSulad starts common containers that are suitable for starting common processes. However, common containers have only the default permissions defined by capabilities in the **/etc/default/isulad/config.json** directory. To perform privileged operations \(such as use devices in the **/sys** directory\), a privileged container is required. By using this feature, user **root** in the container has **root** permissions of the host. Otherwise, user **root** in the container has only common user permissions of the host. - -## Usage Restrictions - -Privileged containers provide all functions for containers and remove all restrictions enforced by the device cgroup controller. A privileged container has the following features: - -- Secomp does not block any system call. -- The **/sys** and **/proc** directories are writable. -- All devices on the host can be accessed in the container. - -- All system capabilities will be enabled. - -Default capabilities of a common container are as follows: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Capability Key

-

Description

-

SETPCAP

-

Modifies the process capabilities.

-

MKNOD

-

Allows using the system call mknod() to create special files.

-

AUDIT_WRITE

-

Writes records to kernel auditing logs.

-

CHOWN

-

Modifies UIDs and GIDs of files. For details, see the chown(2).

-

NET_RAW

-

Uses RAW and PACKET sockets and binds any IP address to the transparent proxy.

-

DAC_OVERRIDE

-

Ignores the discretionary access control (DAC) restrictions on files.

-

FOWNER

-

Ignores the restriction that the file owner ID must be the same as the process user ID.

-

FSETID

-

Allows setting setuid bits of files.

-

KILL

-

Allows sending signals to processes that do not belong to itself.

-

SETGID

-

Allows the change of the process group ID.

-

SETUID

-

Allows the change of the process user ID.

-

NET_BIND_SERVICE

-

Allows bounding to a port whose number is smaller than 1024.

-

SYS_CHROOT

-

Allows using the system call chroot().

-

SETFCAP

-

Allows transferring and deleting capabilities to other processes.

-
- -When a privileged container is enabled, the following capabilities are added: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Capability Key

-

Description

-

SYS_MODULE

-

Loads and unloads kernel modules.

-

SYS_RAWIO

-

Allows direct access to /devport, /dev/mem, /dev/kmem, and original block devices.

-

SYS_PACCT

-

Allows the process BSD audit.

-

SYS_ADMIN

-

Allows executing system management tasks, such as loading or unloading file systems and setting disk quotas.

-

SYS_NICE

-

Allows increasing the priority and setting the priorities of other processes.

-

SYS_RESOURCE

-

Ignores resource restrictions.

-

SYS_TIME

-

Allows changing the system clock.

-

SYS_TTY_CONFIG

-

Allows configuring TTY devices.

-

AUDIT_CONTROL

-

Enables and disables kernel auditing, modifies audit filter rules, and extracts audit status and filtering rules.

-

MAC_ADMIN

-

Overrides the mandatory access control (MAC), which is implemented for the Smack Linux Security Module (LSM).

-

MAC_OVERRIDE

-

Allows MAC configuration or status change, which is implemented for Smack LSM.

-

NET_ADMIN

-

Allows executing network management tasks.

-

SYSLOG

-

Performs the privileged syslog(2) operation.

-

DAC_READ_SEARCH

-

Ignores the DAC access restrictions on file reading and catalog search.

-

LINUX_IMMUTABLE

-

Allows modifying the IMMUTABLE and APPEND attributes of a file.

-

NET_BROADCAST

-

Allows network broadcast and multicast access.

-

IPC_LOCK

-

Allows locking shared memory segments.

-

IPC_OWNER

-

Ignores the IPC ownership check.

-

SYS_PTRACE

-

Allows tracing any process.

-

SYS_BOOT

-

Allows restarting the OS.

-

LEASE

-

Allows modifying the FL_LEASE flag of a file lock.

-

WAKE_ALARM

-

Triggers the function of waking up the system, for example, sets the CLOCK_REALTIME_ALARM and CLOCK_BOOTTIME_ALARM timers.

-

BLOCK_SUSPEND

-

Allows blocking system suspension.

-
- -## Usage Guide - -iSulad runs the **--privileged** command to enable the privilege mode for containers. Do not add privileges to containers unless necessary. Comply with the principle of least privilege to reduce security risks. - -``` -isula run --rm -it --privileged busybox -``` - diff --git a/docs/en/docs/Container/public_sys-resources/icon-caution.gif b/docs/en/docs/Container/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/Container/public_sys-resources/icon-danger.gif b/docs/en/docs/Container/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/Container/public_sys-resources/icon-note.gif b/docs/en/docs/Container/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/Container/public_sys-resources/icon-notice.gif b/docs/en/docs/Container/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/Container/public_sys-resources/icon-tip.gif b/docs/en/docs/Container/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/Container/public_sys-resources/icon-warning.gif b/docs/en/docs/Container/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Container/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/Container/querying-information.md b/docs/en/docs/Container/querying-information.md deleted file mode 100644 index 34bc33a21945d246c518b75c690b3b68b74d536c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/querying-information.md +++ /dev/null @@ -1,95 +0,0 @@ -# Querying Information - -- [Querying Information](#querying-information) - - [Querying the Service Version](#querying-the-service-version) - - [Querying System-level Information](#querying-system-level-information) - - -## Querying the Service Version - -### Description - -The **isula version** command is run to query the version of the iSulad service. - -### Usage - -``` -isula version -``` - -### Example - -Query the version information. - -``` -isula version -``` - -If the iSulad service is running properly, you can view the information about versions of the client, server, and **OCI config**. - -``` -Client: - Version: 1.0.31 - Git commit: fa7f9902738e8b3d7f2eb22768b9a1372ddd1199 - Built: 2019-07-30T04:21:48.521198248-04:00 - -Server: - Version: 1.0.31 - Git commit: fa7f9902738e8b3d7f2eb22768b9a1372ddd1199 - Built: 2019-07-30T04:21:48.521198248-04:00 - -OCI config: - Version: 1.0.0-rc5-dev - Default file: /etc/default/isulad/config.json -``` - -If the iSulad service is not running, only the client information is queried and a message is displayed indicating that the connection times out. - -``` -Client: - Version: 1.0.31 - Git commit: fa7f9902738e8b3d7f2eb22768b9a1372ddd1199 - Built: 2019-07-30T04:21:48.521198248-04:00 - -Can not connect with server.Is the iSulad daemon running on the host? -``` - -Therefore, the **isula version** command is often used to check whether the iSulad service is running properly. - -## Querying System-level Information - -### Description - -The **isula info** command is run to query the system-level information, number of containers, and number of images. - -### Usage - -``` -isula info -``` - -### Example - -Query system-level information, including the number of containers, number of images, kernel version, and operating system \(OS\). - -``` -$ isula info -Containers: 2 - Running: 0 - Paused: 0 - Stopped: 2 -Images: 8 -Server Version: 1.0.31 -Logging Driver: json-file -Cgroup Driverr: cgroupfs -Hugetlb Pagesize: 2MB -Kernel Version: 4.19 -Operating System: Fedora 29 (Twenty Nine) -OSType: Linux -Architecture: x86_64 -CPUs: 8 -Total Memory: 7 GB -Name: localhost.localdomain -iSulad Root Dir: /var/lib/isulad -``` - diff --git a/docs/en/docs/Container/reboot-or-shutdown-in-a-container.md b/docs/en/docs/Container/reboot-or-shutdown-in-a-container.md deleted file mode 100644 index 84d5c380070ef178a6e525732c4f5cbf21619d0c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/reboot-or-shutdown-in-a-container.md +++ /dev/null @@ -1,82 +0,0 @@ -# Reboot or Shutdown in a Container - -- [Reboot or Shutdown in a Container](#reboot-or-shutdown-in-a-container) - - -## Function Description - -The **reboot** and **shutdown** commands can be executed in a system container. You can run the **reboot** command to restart a container, and run the **shutdown** command to stop a container. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--restart

-
  • Variable of the string type.
  • Supported option is as follows:

    on-reboot: restarts the system container.

    -

      

    -
-
- -## Constraints - -- The shutdown function relies on the actual OS of the container running environment. -- When you run the **shutdown -h now** command to shut down the system, do not open multiple consoles. For example, if you run the **isula run -ti** command to open a console and run the **isula attach** command for the container in another host bash, another console is opened. In this case, the **shutdown** command fails to be executed. - -## Example - -- Specify the **--restart on-reboot** parameter when starting a container. For example: - - ``` - [root@localhost ~]# isula run -tid --restart on-reboot --system-container --external-rootfs /root/myrootfs none init - 106faae22a926e22c828a0f2b63cf5c46e5d5986ea8a5b26de81390d0ed9714f - ``` - - -- In the container, run the **reboot** command. - - ``` - [root@localhost ~]# isula exec -it 10 bash - [root@localhost /]# reboot - ``` - - Check whether the container is restarted. - - ``` - [root@localhost ~]# isula exec -it 10 ps aux - USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND - root 1 0.1 0.0 21588 9504 ? Ss 12:11 0:00 init - root 14 0.1 0.0 27024 9376 ? Ss 12:11 0:00 /usr/lib/system - root 17 0.0 0.0 18700 5876 ? Ss 12:11 0:00 /usr/lib/system - dbus 22 0.0 0.0 9048 3624 ? Ss 12:11 0:00 /usr/bin/dbus-d - root 26 0.0 0.0 8092 3012 ? Rs+ 12:13 0:00 ps aux - ``` - -- In the container, run the **shutdown** command. - - ``` - [root@localhost ~]# isula exec -it 10 bash - [root@localhost /]# shutdown -h now - [root@localhost /]# [root@localhost ~]# - ``` - - Check whether the container is stopped. - - ``` - [root@localhost ~]# isula exec -it 10 bash - Error response from daemon: Exec container error;Container is not running:106faae22a926e22c828a0f2b63cf5c46e5d5986ea8a5b26de81390d0ed9714f - ``` - - diff --git a/docs/en/docs/Container/secure-container.md b/docs/en/docs/Container/secure-container.md deleted file mode 100644 index 760d0296ff0c680eeba4c07dc4be7e6683a7d750..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/secure-container.md +++ /dev/null @@ -1,29 +0,0 @@ -# Secure Container - - -## Overview - -The secure container technology is an organic combination of virtualization and container technologies. Compared with a common Linux container, a secure container has better isolation performance. - -Common Linux containers use namespaces to isolate the running environment between processes and use cgroups to limit resources. Essentially, these common Linux containers share the same kernel. Therefore, if a single container affects the kernel intentionally or unintentionally, the containers on the same host will be affected. - -Secure containers are isolated by the virtualization layers. Containers on the same host do not affect each other. - -**Figure 1** Secure container architecture - - -![](./figures/secure-container.png) - -Secure containers are closely related to the concept of pod in Kubernetes. Kubernetes is the open-source ecosystem standard for the container scheduling management platform. It defines a group of container runtime interfaces \(CRIs\). - -In the CRI standards, a pod is a logical grouping of one or more containers, which are scheduled together and share interprocess communication \(IPC\) and network namespaces. As the smallest unit for scheduling, a pod must contain a pause container and one or more service containers. The lifecycle of a pause container is the same as that of the pod. - -A lightweight virtual machine \(VM\) in a secure container is a pod. The first container started in the VM is the pause container, and the containers started later are service containers. - -In a secure container, you can start a single container or start a pod. - -[Figure 2](#fig17734185518269) shows the relationship between the secure container and peripheral components. - -**Figure 2** Relationship between the secure container and peripheral components -![](./figures/relationship-between-the-secure-container-and-peripheral-components.png "relationship-between-the-secure-container-and-peripheral-components") - diff --git a/docs/en/docs/Container/security-and-isolation.md b/docs/en/docs/Container/security-and-isolation.md deleted file mode 100644 index ada1aada90a1c37d05acf92751d5ed5c0f461eb8..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/security-and-isolation.md +++ /dev/null @@ -1,344 +0,0 @@ -# Security and Isolation - -- [Security and Isolation](#security-and-isolation) - - [Many-to-Many User Namespaces](#many-to-many-user-namespaces) - - [User Permission Control](#user-permission-control) - - [proc File System Isolation \(Lxcfs\)](#proc-file-system-isolation-(lxcfs)) - - - - -## Many-to-Many User Namespaces - -### Function Description - -User namespaces are used to map user **root** of a container to a common user of the host and allow the processes and user in the container \(that are unprivileged on the host\) to have privilege. This can prevent the processes in the container from escaping to the host and performing unauthorized operations. In addition, after user namespaces are used, the container and host use different UIDs and GIDs. This ensures that user resources in the container such as file descriptors are isolated from those on the host. - -In system containers, you can configure the **--user-remap** API parameter to map user namespaces of different containers to different user namespaces on the host, isolating the user namespaces of containers. - -### Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--user-remap

-

The parameter format is uid:gid:offset. The parameter is described as follows:

-
  • uid and gid must be integers greater than or equal to 0.
  • offset must be an integer greater than 0 and less than 65536. The value cannot be too small. Otherwise, the container cannot be started.
  • Either the sum of uid and offset or the sum of gid and offset must be less than or equal to 232 - 1. Otherwise, an error is reported during container startup.
-
- -### Constraints - -- If **--user-remap** is specified in a system container, the rootfs directory must be accessible to users specified by _uid_ or _gid_ in **--user-remap**. Otherwise, user namespaces of containers cannot access rootfs. As a result, the containers fail to be started. -- All IDs in the container can be mapped to the host rootfs. Some directories or files may be mounted from the host to containers, for example, device files in the **/dev/pts** directory. If _offset_ is too small, the mounting may fail. -- _uid_, _gid_, and _offset_ are controlled by the upper-layer scheduling platform. The container engine only checks the validity of them. -- **--user-remap** is available only in system containers. -- **--user-remap** and **--privileged** cannot be set simultaneously. Otherwise, an error is reported during container startup. -- If _uid_ or _gid_ is set to **0**, **--user-remap** does not take effect. - -### Usage Guide - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Before specifying the **--user-remap** parameter, configure an offset value for UIDs and GIDs of all directories and files in rootfs. The offset value should be equal to that for _uid_ and _gid_ in **--user-remap**. ->For example, run the following command to offset UIDs and GIDs of all files in the **dev** directory with 100000: ->chown 100000:100000 dev - -Specify the **--user-remap** parameter when the system container is started. - -``` -[root@localhost ~]# isula run -tid --user-remap 100000:100000:65535 --system-container --external-rootfs /home/root-fs none /sbin/init -eb9605b3b56dfae9e0b696a729d5e1805af900af6ce24428fde63f3b0a443f4a -``` - -Check the /sbin/init process information on the host and in a container. - -``` -[root@localhost ~]# isula exec eb ps aux | grep /sbin/init -root 1 0.6 0.0 21624 9624 ? Ss 15:47 0:00 /sbin/init -[root@localhost ~]# ps aux | grep /sbin/init -100000 4861 0.5 0.0 21624 9624 ? Ss 15:47 0:00 /sbin/init -root 4948 0.0 0.0 213032 808 pts/0 S+ 15:48 0:00 grep --color=auto /sbin/init -``` - -The owner of the /sbin/init process in the container is user **root**, but the owner of the host is the user whose UID is **100000**. - -Create a file in a container and view the file owner on the host. - -``` -[root@localhost ~]# isula exec -it eb bash -[root@localhost /]# echo test123 >> /test123 -[root@localhost /]# exit -exit -[root@localhost ~]# ll /home/root-fs/test123 --rw-------. 1 100000 100000 8 Aug 2 15:52 /home/root-fs/test123 -``` - -The owner of the file that is generated in the container is user **root**, but the file owner displayed on the host is the user whose ID is **100000**. - -## User Permission Control - -### Function Description - -A container engine supports TLS for user identity authentication, which is used to control user permissions. Currently, container engines can connect to the authz plug-in to implement permission control. - -### API Description - -You can configure the startup parameters of the iSulad container engine to specify the permission control plug-in. The default daemon configuration file is **/etc/isulad/daemon.json**. - - - - - - - - - - - - -

Parameter

-

Example

-

Description

-

--authorization-plugin

-

"authorization-plugin": "authz-broker"

-

User permission authentication plug-in. Currently, only authz-broker is supported.

-
- -### Constraints - -- User permission policies need to be configured for authz. The default policy file is **/var/lib/authz-broker/policy.json**. This file can be dynamically modified and the modification will take effect immediately without restarting the plug-in service. -- A container engine can be started by user **root**. If some commands used are enabled for by common users, common users may obtain excessive permissions. Therefore, exercise caution when performing such operations. Currently, running the **container\_attach**, **container\_create**, and **container\_exec\_create** commands may cause risks. -- Some compound operations, such as running **isula exec** and **isula inspect** or running and **isula attach** and **isula inspect**, depend on the permission of **isula inspect**. If a user does not have this permission, an error is reported. -- Using SSL/TLS encryption channels hardens security but also reduces performance. For example, the delay increases, more CPU resources are consumed, and encryption and decryption require higher throughput. Therefore, the number of concurrent executions decreases compared with non-TLS communication. According to the test result, when the ARM server \(Cortex-A72 64-core\) is almost unloaded, TLS is used to concurrently start a container. The maximum number of concurrent executions is 200 to 250. -- If **--tlsverify** is specified on the server, the default path where authentication files store is **/etc/isulad**. The default file names are **ca.pem**, **cert.pem**, and **key.pem**. - -### Example - -1. Ensure that the authz plug-in is installed on the host. If the authz plug-in is not installed, run the following command to install and start the authz plug-in service: - - ``` - [root@localhost ~]# yum install authz - [root@localhost ~]# systemctl start authz - ``` - -2. To enable this function, configure the container engine and TLS certificate. You can use OpenSSL to generate the required certificate. - - ``` - #SERVERSIDE - - # Generate CA key - openssl genrsa -aes256 -passout "pass:$PASSWORD" -out "ca-key.pem" 4096 - # Generate CA - openssl req -new -x509 -days $VALIDITY -key "ca-key.pem" -sha256 -out "ca.pem" -passin "pass:$PASSWORD" -subj "/C=$COUNTRY/ST=$STATE/L=$CITY/O=$ORGANIZATION/OU=$ORGANIZATIONAL_UNIT/CN=$COMMON_NAME/emailAddress=$EMAIL" - # Generate Server key - openssl genrsa -out "server-key.pem" 4096 - - # Generate Server Certs. - openssl req -subj "/CN=$COMMON_NAME" -sha256 -new -key "server-key.pem" -out server.csr - - echo "subjectAltName = DNS:localhost,IP:127.0.0.1" > extfile.cnf - echo "extendedKeyUsage = serverAuth" >> extfile.cnf - - openssl x509 -req -days $VALIDITY -sha256 -in server.csr -passin "pass:$PASSWORD" -CA "ca.pem" -CAkey "ca-key.pem" -CAcreateserial -out "server-cert.pem" -extfile extfile.cnf - - #CLIENTSIDE - - openssl genrsa -out "key.pem" 4096 - openssl req -subj "/CN=$CLIENT_NAME" -new -key "key.pem" -out client.csr - echo "extendedKeyUsage = clientAuth" > extfile.cnf - openssl x509 -req -days $VALIDITY -sha256 -in client.csr -passin "pass:$PASSWORD" -CA "ca.pem" -CAkey "ca-key.pem" -CAcreateserial -out "cert.pem" -extfile extfile.cnf - ``` - - If you want to use the preceding content as the script, replace the variables with the configured values. If the parameter used for generating the CA is empty, set it to **"**. **PASSWORD**, **COMMON\_NAME**, **CLIENT\_NAME**, and **VALIDITY** are mandatory. - -3. When starting the container engine, add parameters related to the TLS and authentication plug-in and ensure that the authentication plug-in is running properly. In addition, to use TLS authentication, the container engine must be started in TCP listening mode instead of the Unix socket mode. The configuration on the container demon is as follows: - - ``` - { - "tls": true, - "tls-verify": true, - "tls-config": { - "CAFile": "/root/.iSulad/ca.pem", - "CertFile": "/root/.iSulad/server-cert.pem", - "KeyFile":"/root/.iSulad/server-key.pem" - }, - "authorization-plugin": "authz-broker" - } - ``` - -4. Configure policies. For the basic authorization process, all policies are stored in the **/var/lib/authz-broker/policy.json** configuration file. The configuration file can be dynamically modified without restarting the plug-in. Only the SIGHUP signal needs to be sent to the authz process. In the file, a line contains one JSON policy object. The following provides policy configuration examples: - - - All users can run all iSuald commands: **\{"name":"policy\_0","users":\[""\],"actions":\[""\]\}** - - Alice can run all iSulad commands: **\{"name":"policy\_1","users":\["alice"\],"actions":\[""\]\}** - - A blank user can run all iSulad commands: ** \{"name":"policy\_2","users":\[""\],"actions":\[""\]\}** - - Alice and Bob can create new containers: **\{"name":"policy\_3","users":\["alice","bob"\],"actions":\["container\_create"\]\}** - - service\_account can read logs and run **docker top**: **\{"name":"policy\_4","users":\["service\_account"\],"actions":\["container\_logs","container\_top"\]\}** - - Alice can perform any container operations: **\{"name":"policy\_5","users":\["alice"\],"actions":\["container"\]\}** - - Alice can perform any container operations, but the request type can only be **get**: **\{"name":"policy\_5","users":\["alice"\],"actions":\["container"\], "readonly":true\}** - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- **action** indicates that regular expressions are supported. - >- **users** indicates that regular expressions are not supported. - >- Users configured in **users** must be unique. That is, a user cannot match multiple rules. - -5. After updating the configurations, configure TLS parameters on the client to connect to the container engine. That is, access the container engine with restricted permissions. - - ``` - [root@localhost ~]# isula version --tlsverify --tlscacert=/root/.iSulad/ca.pem --tlscert=/root/.iSulad/cert.pem --tlskey=/root/.iSulad/key.pem -H=tcp://127.0.0.1:2375 - ``` - - If you want to use the TLS authentication for default client connection, move the configuration file to **\~/.iSulad** and set the **ISULAD\_HOST** and **ISULAD\_TLS\_VERIFY** variables \(rather than transferring **-H=tcp://$HOST:2375** and -**-tlsverify** during each call\). - - ``` - [root@localhost ~]# mkdir -pv ~/.iSulad - [root@localhost ~]# cp -v {ca,cert,key}.pem ~/.iSulad - [root@localhost ~]# export ISULAD_HOST=localhost:2375 ISULAD_TLS_VERIFY=1 - [root@localhost ~]# isula version - ``` - - -## proc File System Isolation \(Lxcfs\) - -### Application Scenario - -Container virtualization is lightweight and efficient, and can be quickly deployed. However, containers are not strongly isolated, which causes great inconvenience to users. Containers have some defects in isolation because the namespace feature of the Linux kernel is not perfect. For example, you can view the proc information on the host \(such as meminfo, cpuinfo, stat, and uptime\) in the proc file system of a container. You can use the lxcfs tool to replace the /proc content of instances in the container with the content in the /proc file system of the host so that services in the container can obtain the correct resource value. - -### API Description - -A system container provides two tool packages: lxcfs and lxcfs-toolkit, which are used together. Lxcfs resides on the host as the daemon process. lxcfs-toolkit mounts the lxcfs file system of the host to containers through the hook mechanism. - -The command line of lxcfs-toolkit is as follows: - -``` -lxcfs-toolkit [OPTIONS] COMMAND [COMMAND_OPTIONS] -``` - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Function

-

Parameter

-

remount

-

Remounts lxcfs to containers.

-

--all: remounts lxcfs to all containers.

-

--container-id: remounts lxcfs to a specified container.

-

umount

-

Unmounts lxcfs from containers.

-

--all: unmounts lxcfs from all containers.

-

--container-id: unmounts lxcfs from a specified container.

-

check-lxcfs

-

Checks whether the lxcfs service is running properly.

-

None

-

prestart

-

Mounts the /var/lib/lxcfs directory to the container before the lxcfs service starts.

-

None

-
- -### Constraints - -- Currently, only the **cpuinfo**, **meminfo**, **stat**, **diskstats**, **partitions**, **swaps**, and **uptime** files in the proc file system are supported. Other files are not isolated from other kernel API file systems \(such as sysfs\). -- After an RPM package is installed, a sample JSON file is generated in **/var/lib/lcrd/hooks/hookspec.json**. To add the log function, you need to add the **--log** configuration during customization. -- The **diskstats** file displays only information about disks that support CFQ scheduling, instead of partition information. Devices in containers are displayed as names in the **/dev** directory. If a device name does not exist, the information is left blank. In addition, the device where the container root directory is located is displayed as **sda**. -- The **slave** parameter is required when lxcfs is mounted. If the **shared** parameter is used, the mount point in containers may be leaked to the host, affecting the host running. -- Lxcfs supports graceful service degradation. If the lxcfs service crashes or becomes unavailable, the **cpuinfo**, **meminfo**, **stat**, **diskstats**, **partitions**, **swaps **and **uptime** files in containers are about host information, and other service functions of containers are not affected. -- Bottom layer of lxcfs depends on the FUSE kernel module and libfuse library. Therefore, the kernel needs to support FUSE. -- Lxcfs supports only the running of 64-bit applications in containers. If a 32-bit application is running in a container, the CPU information \(**cpuinfo**\) read by the application may fail to meet expectations. -- Lxcfs simulates the resource view only of container control groups \(cgroups\). Therefore, system calls \(such as sysconf\) in containers can obtain only host information. Lxcfs cannot implement the kernel isolation. -- The CPU information \(**cpuinfo**\) displayed after lxcfs implements the isolation has the following features: - - **processor**: The value increases from 0. - - **physical id**: The value increases from 0. - - **sibliing**: It has a fixed value of **1**. - - **core id**: It has a fixed value of **0**. - - **cpu cores**: It has a fixed value of **1**. - - -### Example - -1. Install the lxcfs and lxcfs-toolkit packages and start the lxcfs service. - - ``` - [root@localhost ~]# yum install lxcfs lxcfs-toolkit - [root@localhost ~]# systemctl start lxcfs - ``` - -2. After a container is started, check whether the lxcfs mount point exists in the container. - - ``` - [root@localhost ~]# isula run -tid -v /var/lib/lxc:/var/lib/lxc --hook-spec /var/lib/isulad/hooks/hookspec.json --system-container --external-rootfs /home/root-fs none init - a8acea9fea1337d9fd8270f41c1a3de5bceb77966e03751346576716eefa9782 - [root@localhost ~]# isula exec a8 mount | grep lxcfs - lxcfs on /var/lib/lxc/lxcfs type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - lxcfs on /proc/cpuinfo type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - lxcfs on /proc/diskstats type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - lxcfs on /proc/meminfo type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - lxcfs on /proc/partitions type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - lxcfs on /proc/stat type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - lxcfs on /proc/swaps type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - lxcfs on /proc/uptime type fuse.lxcfs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other) - ``` - -3. Run the **update** command to update the CPU and memory resource configurations of the container and check the container resources. As shown in the following command output, the container resource view displays the actual container resource data instead of data of the host. - - ``` - [root@localhost ~]# isula update --cpuset-cpus 0-1 --memory 1G a8 - a8 - [root@localhost ~]# isula exec a8 cat /proc/cpuinfo - processor : 0 - BogoMIPS : 100.00 - cpu MHz : 2400.000 - Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid - CPU implementer : 0x41 - CPU architecture: 8 - CPU variant : 0x0 - CPU part : 0xd08 - CPU revision : 2 - - processor : 1 - BogoMIPS : 100.00 - cpu MHz : 2400.000 - Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid - CPU implementer : 0x41 - CPU architecture: 8 - CPU variant : 0x0 - CPU part : 0xd08 - CPU revision : 2 - - [root@localhost ~]# isula exec a8 free -m - total used free shared buff/cache available - Mem: 1024 17 997 7 8 1006 - Swap: 4095 0 4095 - ``` - - diff --git a/docs/en/docs/Container/security-features.md b/docs/en/docs/Container/security-features.md deleted file mode 100644 index 667f89a14a263b34ef6ce9ca001524cd907fe09b..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/security-features.md +++ /dev/null @@ -1,270 +0,0 @@ -# Security Features - -- [Security Features](#security-features) - - [Seccomp Security Configuration](#seccomp-security-configuration) - - [Scenarios](#scenarios-9) - - [Usage Restrictions](#usage-restrictions-10) - - [Usage Guide](#usage-guide-11) - - [capabilities Security Configuration](#capabilities-security-configuration) - - [Scenarios](#scenarios-12) - - [Usage Restrictions](#usage-restrictions-13) - - [Usage Guide](#usage-guide-14) - - [SELinux Security Configuration](#selinux-security-configuration) - - [Scenarios](#scenarios-15) - - [Usage Restrictions](#usage-restrictions-16) - - [Usage Guide](#usage-guide-17) - - - - - -## Seccomp Security Configuration - - - -### Scenarios - -Secure computing mode \(seccomp\) is a simple sandboxing mechanism introduced to the Linux kernel from version 2.6.23. In some specific scenarios, you may want to perform some privileged operations in a container without starting the privileged container. You can add **--cap-add** at runtime to obtain some small-scope permissions. For container instances with strict security requirements, th capability granularity may not meet the requirements. You can use some methods to control the permission scope in a refined manner. - -- Example - - In a common container scenario, you can use the **-v** flag to map a directory \(including a binary file that cannot be executed by common users\) on the host to the container. - - In the container, you can add chmod 4777 \(the modification permission of the binary file\) to the S flag bit. In this way, on the host, common users who cannot run the binary file \(or whose running permission is restricted\) can obtain the permissions of the binary file \(such as the root permission\) when running the binary file after the action added to the S flag bit is performed, so as to escalate the permission or access other files. - - In this scenario, if strict security requirements are required, the chmod, fchmod, and fchmodat system calls need to be tailored by using seccomp. - - -### Usage Restrictions - -- Seccomp may affect performance. Before setting seccomp, evaluate the scenario and add the configuration only if necessary. - -### Usage Guide - -Use **--security-opt** to transfer the configuration file to the container where system calls need to be filtered. - -``` -isula run -itd --security-opt seccomp=/path/to/seccomp/profile.json rnd-dockerhub.huawei.com/official/busybox -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->1. When the configuration file is transferred to the container by using **--security-opt** during container creation, the default configuration file \(**/etc/isulad/seccomp\_default.json**\) is used. ->2. When **--security-opt** is set to **unconfined** during container creation, system calls are not filtered for the container. ->3. **/path/to/seccomp/profile.json** must be an absolute path. - -#### Obtaining the Default Seccomp Configuration of a Common Container - -- Start a common container \(or a container with **--cap-add**\) and check its default permission configuration. - - ``` - cat /etc/isulad/seccomp_default.json | python -m json.tool > profile.json - ``` - - The **seccomp** field contains many **syscalls** fields. Then extract only the **syscalls** fields and perform the customization by referring to the customization of the seccomp configuration file. - - ``` - "defaultAction": "SCMP_ACT_ERRNO", - "syscalls": [ - { - "action": "SCMP_ACT_ALLOW", - "name": "accept" - }, - { - "action": "SCMP_ACT_ALLOW", - "name": "accept4" - }, - { - "action": "SCMP_ACT_ALLOW", - "name": "access" - }, - { - "action": "SCMP_ACT_ALLOW", - "name": "alarm" - }, - { - "action": "SCMP_ACT_ALLOW", - "name": "bind" - }, - ]... - ``` - - -- Check the seccomp configuration that can be identified by the LXC. - - ``` - cat /var/lib/isulad/engines/lcr/74353e38021c29314188e29ba8c1830a4677ffe5c4decda77a1e0853ec8197cd/seccomp - ``` - - ``` - ... - waitpid allow - write allow - writev allow - ptrace allow - personality allow [0,0,SCMP_CMP_EQ,0] - personality allow [0,8,SCMP_CMP_EQ,0] - personality allow [0,131072,SCMP_CMP_EQ,0] - personality allow [0,131080,SCMP_CMP_EQ,0] - personality allow [0,4294967295,SCMP_CMP_EQ,0] - ... - ``` - - -#### Customizing the Seccomp Configuration File - -When starting a container, use **--security-opt** to introduce the seccomp configuration file. Container instances will restrict the running of system APIs based on the configuration file. Obtain the default seccomp configuration of common containers, obtain the complete template, and customize the configuration file by referring to this section to start the container. - -``` -isula run --rm -it --security-opt seccomp:/path/to/seccomp/profile.json rnd-dockerhub.huawei.com/official/busybox -``` - -The configuration file template is as follows: - -``` -{ -"defaultAction": "SCMP_ACT_ALLOW", -"syscalls": [ -{ -"name": "syscall-name", -"action": "SCMP_ACT_ERRNO", -"args": null -} -] -} -``` - ->![](./public_sys-resources/icon-notice.gif) **NOTICE:** ->- **defaultAction** and **syscalls**: The types of their corresponding actions are the same, but their values must be different. The purpose is to ensure that each syscall has a default action. Clear definitions in the syscall array shall prevail. As long as the values of **defaultAction** and **action** are different, no action conflicts will occur. The following actions are supported: -> **SCMP\_ACT\_ERRNO**: forbids calling syscalls and displays error information. -> **SCMP\_ACT\_ALLOW**: allows calling syscalls. ->- **syscalls**: array, which can contain one or more syscalls. **args** is optional. ->- **name**: syscalls to be filtered. ->- **args**: array. The definition of each object in the array is as follows: -> ``` -> type Arg struct { -> Index uint `json:"index"` // Parameter ID. Take open(fd, buf, len) as an example. The fd corresponds to 0 and buf corresponds to 1. -> Value uint64 `json:"value"` // Value to be compared with the parameter. -> ValueTwo uint64 `json:"value_two"` // It is valid only when Op is set to MaskEqualTo. After the bitwise AND operation is performed on the user-defined value and the value of Value, the result is compared with the value of ValueTwo. If they are the same, the action is executed. -> Op Operator `json:"op"` -> } -> ``` -> The value of **Op** in **args** can be any of the following: -> "SCMP\_CMP\_NE": NotEqualTo -> "SCMP\_CMP\_LT": LessThan -> "SCMP\_CMP\_LE": LessThanOrEqualTo -> "SCMP\_CMP\_EQ": EqualTo -> "SCMP\_CMP\_GE": GreaterThanOrEqualTo -> "SCMP\_CMP\_GT": GreaterThan -> "SCMP\_CMP\_MASKED\_EQ": MaskEqualTo - -## capabilities Security Configuration - - - - -### Scenarios - -The capability mechanism is a security feature introduced to Linux kernel after version 2.2. The super administrator permission is controlled at a smaller granularity to prevent the root permission from being used. The root permission is divided based on different domains so that the divided permissions can be enabled or disabled separately. For details about capabilities, see the _Linux Programmer's Manual_ \([capabilities\(7\) - Linux man page](http://man7.org/linux/man-pages/man7/capabilities.7.html)\). - -``` -man capabilities -``` - -### Usage Restrictions - -- The default capability list \(whitelist\) of the iSulad service, which is carried by common container processes by default, are as follows: - - ``` - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE" - ``` - -- Default configurations of capabilities include **CAP\_SETUID** and **CAP\_FSETID**. If the host and a container share a directory, the container can set permissions for the binary file in the shared directory. Common users on the host can use this feature to elevate privileges. The container can write **CAP\_AUDIT\_WRITE** to the host, which may cause risks. If the application scenario does not require this capability, you are advised to use **--cap-drop** to delete the capability when starting the container. -- Adding capabilities means that the container process has greater capabilities than before. In addition, more system call APIs are opened. - -### Usage Guide - -iSulad uses **--cap-add** or **--cap-drop** to add or delete specific permissions for a container. Do not add extra permissions to the container unless necessary. You are advised to remove the default but unnecessary permissions from the container. - -``` -isula run --rm -it --cap-add all --cap-drop SYS_ADMIN rnd-dockerhub.huawei.com/official/busybox -``` - -## SELinux Security Configuration - - - -### Scenarios - -Security-Enhanced Linux \(SELinux\) is a Linux kernel security module that provides a mechanism for supporting access control security policies. Through Multi-Category Security \(MCS\), iSulad labels processes in containers to control containers' access to resources, reducing privilege escalation risks and preventing further damage. - -### Usage Restrictions - -- Ensure that SELinux is enabled for the host and daemon \(the **selinux-enabled** field in the **/etc/isulad/daemon.json** file is set to **true** or **--selinux-enabled** is added to command line parameters\). -- Ensure that a proper SELinux policy has been configured on the host. container-selinux is recommended. -- The introduction of SELinux affects the performance. Therefore, evaluate the scenario before setting SELinux. Enable the SELinux function for the daemon and set the SELinux configuration in the container only when necessary. -- When you configure labels for a mounted volume, the source directory cannot be a subdirectory of **/**, **/usr**, **/etc**, **/tmp**, **/home**, **/run**, **/var**, **/root**, or **/usr**. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- iSulad does not support labeling the container file system. To ensure that the container file system and configuration directory are labeled with the container access permission, run the **chcon** command to label them. ->- If SELinux access control is enabled for iSulad, you are advised to add a label to the **/var/lib/isulad** directory before starting daemon. Files and folders generated in the directory during container creation inherit the label by default. For example: -> ``` -> chcon -R system_u:object_r:container_file_t:s0 /var/lib/isulad -> ``` - -### Usage Guide - -- Enable SELinux for daemon. - - ``` - isulad --selinux-enabled - ``` - - -   - -- Configure SELinux security context labels during container startup. - - **--security-opt="label=user:USER"**: Set the label user for the container. - - **--security-opt="label=role:ROLE"**: Set the label role for the container. - - **--security-opt="label=type:TYPE"**: Set the label type for the container. - - **--security-opt="label=level:LEVEL"**: Set the label level for the container. - - **--security-opt="label=disable"**: Disable the SELinux configuration for the container. - - ``` - $ isula run -itd --security-opt label=type:container_t --security-opt label=level:s0:c1,c2 rnd-dockerhub.huawei.com/official/centos - 9be82878a67e36c826b67f5c7261c881ff926a352f92998b654bc8e1c6eec370 - ``` - - -   - -- Add the selinux label to a mounted volume \(**z** indicates the shared mode\). - - ``` - $ isula run -itd -v /test:/test:z rnd-dockerhub.huawei.com/official/centos - 9be82878a67e36c826b67f5c7261c881ff926a352f92998b654bc8e1c6eec370 - - $ls -Z /test - system_u:object_r:container_file_t:s0 file - ``` - -    - - diff --git a/docs/en/docs/Container/shared-memory-channels.md b/docs/en/docs/Container/shared-memory-channels.md deleted file mode 100644 index f00335a8fe96cb4b9e08c181566800601a15d63a..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/shared-memory-channels.md +++ /dev/null @@ -1,59 +0,0 @@ -# Shared Memory Channels - -- [Shared Memory Channels](#shared-memory-channels) - - -## Function Description - -System containers enable the communication between container and host processes through shared memory. You can set the **--host-channel** parameter when creating a container to allow the host to share the same tmpfs with the container so that they can communicate with each other. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--host-channel

-
  • Variable of the string type. Its format is as follows:
    <host path>:<container path>:<rw/ro>:<size limit>
    -
  • The parameter is described as follows:

    <host path>: path to which tmpfs is mounted on the host, which must be an absolute path.

    -

    <container path>: path to which tmpfs is mounted in a container, which must be an absolute path.

    -

    <rw/ro>: permissions on the file system mounted to the container. The value can only be rw (read and write) or ro (read only). The default value is rw.

    -

    <size limit>: maximum size used by the mounted tmpfs. The minimum value is one 4 KB physical page, and the maximum value is half of the total physical memory in the system. The default value is 64MB.

    -
-
- -## Constraints - -- The lifecycle of tmpfs mounted on the host starts from the container startup to the container deletion. After a container is deleted and its occupied space is released, the space is removed. -- When a container is deleted, the path to which tmpfs is mounted on the host is deleted. Therefore, an existing directory on the host cannot be used as the mount path. -- To ensure that processes running by non-root users on the host can communicate with containers, the permission for tmpfs mounted on the host is 1777. - -## Example - -Specify the **--host-channel** parameter when creating a container. - -``` -[root@localhost ~]# isula run --rm -it --host-channel /testdir:/testdir:rw:32M --system-container --external-rootfs /root/myrootfs none init -root@3b947668eb54:/# dd if=/dev/zero of=/testdir/test.file bs=1024 count=64K -dd: error writing '/testdir/test.file': No space left on device -32769+0 records in -32768+0 records out -33554432 bytes (34 MB, 32 MiB) copied, 0.0766899 s, 438 MB/s -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- If **--host-channel** is used for size limit, the file size is constrained by the memory limit in the container. \(The OOM error may occur when the memory usage reaches the upper limit.\) ->- If a user creates a shared file on the host, the file size is not constrained by the memory limit in the container. ->- If you need to create a shared file in the container and the service is memory-intensive, you can add the value of **--host-channel** to the original value of the container memory limit, eliminating the impact. - diff --git a/docs/en/docs/Container/specifying-rootfs-to-create-a-container.md b/docs/en/docs/Container/specifying-rootfs-to-create-a-container.md deleted file mode 100644 index 8b0763f4d276fbf56c952c00acc251abbf1160ac..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/specifying-rootfs-to-create-a-container.md +++ /dev/null @@ -1,49 +0,0 @@ -# Specifying Rootfs to Create a Container - -- [Specifying Rootfs to Create a Container](#specifying-rootfs-to-create-a-container) - - -## Function Description - -Different from a common container that needs to be started by specifying a container image, a system container is started by specifying a local root file system \(rootfs\) through the **--external-rootfs** parameter. Rootfs contains the operating system environment on which the container depends during running. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--external-rootfs

-
  • Variable of the string type.
  • Absolute path in the root file system of the container, that is, the path of rootfs.
-
- -## Constraints - -- The rootfs directory specified by the **--external-rootfs** parameter must be an absolute path. -- The rootfs directory specified by the **--external-rootfs** parameter must be a complete OS environment. Otherwise, the container fails to be started. -- When a container is deleted, the rootfs directory specified by **--external-rootfs** is not deleted. -- Containers based on ARM rootfs cannot run on x86 servers. Containers based on x86 rootfs cannot run on ARM servers. -- You are not advised to start multiple container instances by using the same rootfs. That is, one rootfs is used only by container instances in the same lifecycle. - -## Example - -If the local rootfs path is **/root/myrootfs**, run the following command to start a system container: - -``` -# isula run -tid --system-container --external-rootfs /root/myrootfs none init -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Rootfs is a user-defined file system. Prepare it by yourself. For example, a rootfs is generated after the TAR package of container images is decompressed. - diff --git a/docs/en/docs/Container/statistics.md b/docs/en/docs/Container/statistics.md deleted file mode 100644 index bda570d8c5a4741abd61abedcfcadf7ad012cc83..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/statistics.md +++ /dev/null @@ -1,110 +0,0 @@ -# Statistics - -- [Statistics](#statistics) - - [events](#events) - - [info](#info) - - [version](#version) - - -## events - -Syntax: **docker events \[**_options_**\]** - -Function: Obtains real-time events from the docker daemon. - -Parameter description: - -**--since=""**: Displays events generated after the specified timestamp. - -**--until=""**: Displays events generated before the specified timestamp. - -Example: - -After the **docker events** command is executed, a container is created and started by running the **docker run** command. create and start events are output. - -``` -$ sudo docker events -2019-08-28T16:23:09.338838795+08:00 container create 53450588a20800d8231aa1dc4439a734e16955387efb5f259c47737dba9e2b5e (image=busybox:latest, name=eager_wu) -2019-08-28T16:23:09.339909205+08:00 container attach 53450588a20800d8231aa1dc4439a734e16955387efb5f259c47737dba9e2b5e (image=busybox:latest, name=eager_wu) -2019-08-28T16:23:09.397717518+08:00 network connect e2e20f52662f1ee2b01545da3b02e5ec7ff9c85adf688dce89a9eb73661dedaa (container=53450588a20800d8231aa1dc4439a734e16955387efb5f259c47737dba9e2b5e, name=bridge, type=bridge) -2019-08-28T16:23:09.922224724+08:00 container start 53450588a20800d8231aa1dc4439a734e16955387efb5f259c47737dba9e2b5e (image=busybox:latest, name=eager_wu) -2019-08-28T16:23:09.924121158+08:00 container resize 53450588a20800d8231aa1dc4439a734e16955387efb5f259c47737dba9e2b5e (height=48, image=busybox:latest, name=eager_wu, width=210) -``` - -   - -## info - -Syntax: **docker info** - -Function: Displays the Docker system information, including the number of containers, number of images, image storage driver, container execution driver, kernel version, and host OS version. - -Parameter description: none. - -Example: - -``` -$ sudo docker info -Containers: 4 - Running: 3 - Paused: 0 - Stopped: 1 -Images: 45 -Server Version: 18.09.0 -Storage Driver: devicemapper - Pool Name: docker-thinpool - Pool Blocksize: 524.3kB - Base Device Size: 10.74GB - Backing Filesystem: ext4 - Udev Sync Supported: true - Data Space Used: 11GB - Data Space Total: 51GB - Data Space Available: 39.99GB - Metadata Space Used: 5.083MB - Metadata Space Total: 532.7MB - Metadata Space Available: 527.6MB - Thin Pool Minimum Free Space: 5.1GB - Deferred Removal Enabled: true - Deferred Deletion Enabled: true - Deferred Deleted Device Count: 0 -...... -``` - -   - -## version - -Syntax: **docker version** - -Function: Displays the Docker version information, including the client version, server version, Go version, and OS and Arch information. - -Parameter description: none. - -Example: - -``` -$ sudo docker version -Client: - Version: 18.09.0 - EulerVersion: 18.09.0.48 - API version: 1.39 - Go version: go1.11 - Git commit: cbf6283 - Built: Mon Apr 1 00:00:00 2019 - OS/Arch: linux/arm64 - Experimental: false - -Server: - Engine: - Version: 18.09.0 - EulerVersion: 18.09.0.48 - API version: 1.39 (minimum version 1.12) - Go version: go1.11 - Git commit: cbf6283 - Built: Mon Apr 1 00:00:00 2019 - OS/Arch: linux/arm64 - Experimental: false -``` - -   - diff --git a/docs/en/docs/Container/supporting-oci-hooks.md b/docs/en/docs/Container/supporting-oci-hooks.md deleted file mode 100644 index 76ce5759967a23f6ded80ac49bfc30c429b402a8..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/supporting-oci-hooks.md +++ /dev/null @@ -1,87 +0,0 @@ -# Supporting OCI hooks - -- [Supporting OCI hooks](#supporting-oci-hooks) - - [Description](#description) - - [APIs](#apis) - - [Usage Restrictions](#usage-restrictions) - - -## Description - -The running of standard OCI hooks within the lifecycle of a container is supported. There are three types of standard hooks: - -- prestart hook: executed after the **isula start** command is executed and before the init process of the container is started. -- poststart hook: executed after the init process is started and before the **isula start** command is returned. -- poststop hook: executed after the container is stopped and before the stop command is returned. - -The configuration format specifications of OCI hooks are as follows: - -- **path**: \(Mandatory\) The value must be a character string and must be an absolute path. The specified file must have the execute permission. -- **args**: \(Optional\) The value must be a character string array. The syntax is the same as that of **args** in **execv**. -- **env**: \(Optional\) The value must be a character string array. The syntax is the same as that of environment variables. The content is a key-value pair, for example, **PATH=/usr/bin**. -- **timeout**: \(Optional\) The value must be an integer that is greater than 0. It indicates the timeout interval for hook execution. If the running time of the hook process exceeds the configured time, the hook process is killed. - -The hook configuration is in JSON format and usually stored in a file ended with **json**. An example is as follows: - -``` -{ - "prestart": [ - { - "path": "/usr/bin/echo", - "args": ["arg1", "arg2"], - "env": [ "key1=value1"], - "timeout": 30 - }, - { - "path": "/usr/bin/ls", - "args": ["/tmp"] - } - ], - "poststart": [ - { - "path": "/usr/bin/ls", - "args": ["/tmp"], - "timeout": 5 - } - ], - "poststop": [ - { - "path": "/tmp/cleanup.sh", - "args": ["cleanup.sh", "-f"] - } - ] -} -``` - -## APIs - -Both iSulad and iSula provide the hook APIs. The default hook configurations provided by iSulad apply to all containers. The hook APIs provided by iSula apply only to the currently created container. - -The default OCI hook configurations provided by iSulad are as follows: - -- Set the configuration item **hook-spec** in the **/etc/isulad/daemon.json** configuration file to specify the path of the hook configuration file. Example: **"hook-spec": "/etc/default/isulad/hooks/default.json"** -- Use the **isulad --hook-spec** parameter to set the path of the hook configuration file. - -The OCI hook configurations provided by iSula are as follows: - -- **isula create --hook-spec**: specifies the path of the hook configuration file in JSON format. -- **isula run --hook-spec**: specifies the path of the hook configuration file in JSON format. - -The configuration for **run** takes effect in the creation phase. - -## Usage Restrictions - -- The path specified by **hook-spec** must be an absolute path. -- The file specified by **hook-spec** must exist. -- The path specified by **hook-spec** must contain a common text file in JSON format. -- The file specified by **hook-spec** cannot exceed 10 MB. -- **path** configured for hooks must be an absolute path. -- The file that is designated by **path** configured for hooks must exist. -- The file that is designated by **path** configured for hooks must have the execute permission. -- The owner of the file that is designated by **path** configured for hooks must be user **root**. -- Only user **root** has the write permission on the file that is designated by **path** configured for hooks. -- The value of **timeout** configured for hooks must be greater than **0**. - -    - - diff --git a/docs/en/docs/Container/system-container.md b/docs/en/docs/Container/system-container.md deleted file mode 100644 index 626a98a8a1aeb893d09d0965fa3a19de85d98be5..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/system-container.md +++ /dev/null @@ -1,5 +0,0 @@ -# System Container - - -System containers are used for heavyweight applications and cloud-based services in scenarios with re-computing, high performance, and high concurrency. Compared with the VM technology, system containers can directly inherit physical machine features and has better performance and less overhead. In addition, system containers can be allocated more computing units of limited resources, reducing costs. Therefore, system containers can be used to build differentiated product competitiveness and provide computing unit instances with higher computing density, lower price, and better performance. - diff --git a/docs/en/docs/Container/uninstallation.md b/docs/en/docs/Container/uninstallation.md deleted file mode 100644 index 192e53f5508cc82384d3d474111029f0d67cc802..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/uninstallation.md +++ /dev/null @@ -1,24 +0,0 @@ -# Uninstallation - -To uninstall iSulad, perform the following operations: - -1. Uninstall iSulad and its dependent software packages. - - If the **yum** command is used to install iSulad, run the following command to uninstall iSulad: - - ``` - $ sudo yum remove iSulad - ``` - - - If the **rpm** command is used to install iSulad, uninstall iSulad and its dependent software packages. Run the following command to uninstall an RPM package. - - ``` - sudo rpm -e iSulad-xx.xx.xx-YYYYmmdd.HHMMSS.gitxxxxxxxx.aarch64.rpm - ``` - -2. Images, containers, volumes, and related configuration files are not automatically deleted. The reference command is as follows: - - ``` - $ sudo rm -rf /var/lib/iSulad - ``` - - diff --git a/docs/en/docs/Container/upgrade-methods.md b/docs/en/docs/Container/upgrade-methods.md deleted file mode 100644 index 5294263ed82402538f59fb9cfe43f950e9b367e8..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/upgrade-methods.md +++ /dev/null @@ -1,21 +0,0 @@ -# Upgrade Methods - -- For an upgrade between patch versions of a major version, for example, upgrading 2.x.x to 2.x.x, run the following command: - - ``` - $ sudo yum update -y iSulad - ``` - -- For an upgrade between major versions, for example, upgrading 1.x.x to 2.x.x, save the current configuration file **/etc/isulad/daemon.json**, uninstall the existing iSulad software package, install the iSulad software package to be upgraded, and restore the configuration file. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- You can run the **sudo rpm -qa |grep iSulad** or **isula version** command to check the iSulad version. ->- If you want to manually perform upgrade between patch versions of a major version, run the following command to download the RPM packages of iSulad and all its dependent libraries: -> ``` -> $ sudo rpm -Uhv iSulad-xx.xx.xx-YYYYmmdd.HHMMSS.gitxxxxxxxx.aarch64.rpm -> ``` -> If the upgrade fails, run the following command to forcibly perform the upgrade: -> ``` -> $ sudo rpm -Uhv --force iSulad-xx.xx.xx-YYYYmmdd.HHMMSS.gitxxxxxxxx.aarch64.rpm -> ``` - diff --git a/docs/en/docs/Container/usage-guide.md b/docs/en/docs/Container/usage-guide.md deleted file mode 100644 index df5b305b523c1d4c60c33fd4d270dc58d613be0a..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/usage-guide.md +++ /dev/null @@ -1,19 +0,0 @@ -# Usage Guide - - -System container functions are enhanced based on the iSula container engine. The container management function and the command format of the function provided by system containers are the same as those provided by the iSula container engine. - -The following sections describe how to use the enhanced functions provided by system containers. For details about other command operations, see [iSulad Container Engine](#isulad-container-engine.md#EN-US_TOPIC_0184808037). - -The system container functions involve only the **isula create/run** command. Unless otherwise specified, this command is used for all functions. The command format is as follows: - -``` -isula create/run [OPTIONS] [COMMAND] [ARG...] -``` - -In the preceding format: - -- **OPTIONS**: one or more command parameters. For details about supported parameters, see [iSulad Container Engine](#isulad-container-engine.md#EN-US_TOPIC_0184808037) \> [Appendix](#appendix.md#EN-US_TOPIC_0184808158) \> [Command Line Parameters](#command-line-parameters.md#EN-US_TOPIC_0189976936). -- **COMMAND**: command executed after a system container is started. -- **ARG**: parameter corresponding to the command executed after a system container is started. - diff --git a/docs/en/docs/Container/using-systemd-to-start-a-container.md b/docs/en/docs/Container/using-systemd-to-start-a-container.md deleted file mode 100644 index 4b251231748f30b94973cdd9a557994350a7db69..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/using-systemd-to-start-a-container.md +++ /dev/null @@ -1,91 +0,0 @@ -# Using systemd to Start a Container - -- [Using systemd to Start a Container](#using-systemd-to-start-a-container) - - -## Function Description - -The init process started in system containers differs from that in common containers. Common containers cannot start system services through systemd. However, system containers have this capability. You can enable the systemd service by specifying the **--system-contianer** parameter when starting a system container. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--system-container

-
  • The value is of a Boolean data type and can be true or false. The default value is true.
  • Specifies whether it is a system container. This function must be enabled.
-
- -## Constraints - -- The systemd service needs to call some special system APIs, including mount, umount2, unshare, reboot, and name\_to\_handle\_at. Therefore, permissions to call the preceding APIs are enabled for system containers when the privileged container tag is disabled. -- All system containers are started by the init process. The init process does not respond to the SIGTERM signal which indicates normal exit. By default, the **stop** command forcibly kills the container 10 seconds later. If you need a quicker stop, you can manually specify the timeout duration of the **stop** command. -- **--system-container** must be used together with **--external-rootfs**. -- Various services can run in a system container. The **systemctl** command is used to manage the service starting and stopping. Services may depend on each other. As a result, when an exception occurs, some service processes are in the D or Z state so that the container cannot exit properly. -- Some service processes in a system container may affect other operation results. For example, if the NetworkManager service is running in the container, adding NICs to the container may be affected \(the NICs are successfully added but then stopped by the NetworkManger\), resulting in unexpected results. -- Currently, system containers and hosts cannot be isolated by using udev events. Therefore, the **fstab** file cannot be configured. -- The systemd service may conflict with the cgconfig service provided by libcgroup. You are advised to delete the libcgroup-related packages from a container or set **Delegate** of the cgconfig service to **no**. - -## Example - -- Specify the **--system-container** and **--external-rootfs** parameters to start a system container. - - ``` - [root@localhost ~]# isula run -tid -n systest01 --system-container --external-rootfs /root/myrootfs none init - ``` - -- After the preceding commands are executed, the container is running properly. You can run the **exec** command to access the container and view the process information. The command output indicates that the systemd service has been started. - - ``` - [root@localhost ~]# isula exec -it systest01 bash - [root@localhost /]# ps -ef - UID PID PPID C STIME TTY TIME CMD - root 1 0 2 06:49 ? 00:00:00 init - root 14 1 2 06:49 ? 00:00:00 /usr/lib/systemd/systemd-journal - root 16 1 0 06:49 ? 00:00:00 /usr/lib/systemd/systemd-network - dbus 23 1 0 06:49 ? 00:00:00 /usr/bin/dbus-daemon --system -- - root 25 0 0 06:49 ? 00:00:00 bash - root 59 25 0 06:49 ? 00:00:00 ps –ef - ``` - - -- Run the **systemctl** command in the container to check the service status. The command output indicates that the service is managed by systemd. - - ``` - [root@localhost /]# systemctl status dbus - ● dbus.service - D-Bus System Message Bus - Loaded: loaded (/usr/lib/systemd/system/dbus.service; static; vendor preset: - disabled) - Active: active (running) since Mon 2019-07-22 06:49:38 UTC; 2min 5 - 8s ago - Docs: man:dbus-daemon(1) - Main PID: 23 (dbus-daemon) - CGroup: /system.slice/dbus.service - └─23 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidf - ile --systemd-activation --syslog-only - - Jul 22 06:49:38 localhost systemd[1]: Started D-Bus System Message Bus. - ``` - -- Run the **systemctl** command in the container to stop or start the service. The command output indicates that the service is managed by systemd. - - ``` - [root@localhost /]# systemctl stop dbus - Warning: Stopping dbus.service, but it can still be activated by: - dbus.socket - [root@localhost /]# systemctl start dbus - ``` - - diff --git a/docs/en/docs/Container/writable-namespace-kernel-parameters.md b/docs/en/docs/Container/writable-namespace-kernel-parameters.md deleted file mode 100644 index d10936907df1dba150fcdbe3482edd05574fa5d6..0000000000000000000000000000000000000000 --- a/docs/en/docs/Container/writable-namespace-kernel-parameters.md +++ /dev/null @@ -1,93 +0,0 @@ -# Writable Namespace Kernel Parameters - -- [Writable Namespace Kernel Parameters](#writable-namespace-kernel-parameters) - - -## Function Description - -For services running in containers, such as databases, big data, and common applications, some kernel parameters need to be set and adjusted to obtain the optimal performance and reliability. The modification permission of all kernel parameters must be disabled or enabled simultaneously \(by using privileged container\). - -When the modification permission is disabled, only the --sysctl external interface is provided and parameters cannot be flexibly modified in a container. - -When the modification permission is enabled, some kernel parameters are globally valid. If some parameters are modified in a container, all programs on the host will be affected, harming security. - -   - -System containers provide the **--ns-change-opt** parameter, which can be used to dynamically set namespace kernel parameters in a container. The parameter value can be **net** or **ipc**. - -## Parameter Description - - - - - - - - - - - - -

Command

-

Parameter

-

Value Description

-

isula create/run

-

--ns-change-opt

-
  • Variable of the string type.
  • The parameter value can be net or ipc.

    net: All namespace parameters in the /proc/sys/net directory are supported.

    -

    ipc: Supported namespace parameters are as follows:

    -

    /proc/sys/kernel/msgmax

    -

    /proc/sys/kernel/msgmnb

    -

    /proc/sys/kernel/msgmni

    -

    /proc/sys/kernel/sem

    -

    /proc/sys/kernel/shmall

    -

    /proc/sys/kernel/shmmax

    -

    /proc/sys/kernel/shmmni

    -

    /proc/sys/kernel/shm_rmid_forced

    -

    /proc/sys/fs/mqueue/msg_default

    -

    /proc/sys/fs/mqueue/msg_max

    -

    /proc/sys/fs/mqueue/msgsize_default

    -

    /proc/sys/fs/mqueue/msgsize_max

    -

    /proc/sys/fs/mqueue/queues_max

    -
  • You can specify multiple namespace configurations and separate them with commas (,). For example, --ns-change-opt=net,ipc.
-
- -## Constraints - -- If both **--privileged** \(privileged container\) and **--ns-change-opt** are specified during container startup, **--ns-change-opt** does not take effect. - -## Example - -Start a container and set **--ns-change-opt** to **net**. - -``` -[root@localhost ~]# isula run -tid --ns-change-opt net --system-container --external-rootfs /root/myrootfs none init -4bf44a42b4a14fdaf127616c90defa64b4b532b18efd15b62a71cbf99ebc12d2 -[root@localhost ~]# isula exec -it 4b mount | grep /proc/sys -proc on /proc/sys type proc (ro,nosuid,nodev,noexec,relatime) -proc on /proc/sysrq-trigger type proc (ro,nosuid,nodev,noexec,relatime) -proc on /proc/sys/net type proc (rw,nosuid,nodev,noexec,relatime) -``` - -The mount point **/proc/sys/net** in the container has the **rw** option, indicating that the **net**-related namespace kernel parameters have the read and write permissions. - -Start another container and set **--ns-change-opt** to **ipc**. - -``` -[root@localhost ~]# isula run -tid --ns-change-opt ipc --system-container --external-rootfs /root/myrootfs none init -c62e5e5686d390500dab2fa76b6c44f5f8da383a4cbbeac12cfada1b07d6c47f -[root@localhost ~]# isula exec -it c6 mount | grep /proc/sys -proc on /proc/sys type proc (ro,nosuid,nodev,noexec,relatime) -proc on /proc/sysrq-trigger type proc (ro,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/shmmax type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/shmmni type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/shmall type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/shm_rmid_forced type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/msgmax type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/msgmni type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/msgmnb type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/kernel/sem type proc (rw,nosuid,nodev,noexec,relatime) -proc on /proc/sys/fs/mqueue type proc (rw,nosuid,nodev,noexec,relatime) -``` - -The mount point information of **ipc**-related kernel parameters in the container contains the **rw** option, indicating that the **ipc**-related namespace kernel parameters have the read and write permissions. - diff --git a/docs/en/docs/Installation/FAQ1.md b/docs/en/docs/Installation/FAQ1.md deleted file mode 100644 index 9f065f6c2b1f9e7d18c90b3f8f76f04fa50a2ee4..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/FAQ1.md +++ /dev/null @@ -1,26 +0,0 @@ -# FAQ - - -- [FAQ](#faq) - - [Failing to Start the Raspberry Pi](#树莓派启动失败) - - [Symptom](#问题现象) - - [Cause Analysis](#原因分析) - - [Solution](#解决方法) - - -## Failing to Start the Raspberry Pi - -### Symptom - -After the Raspberry Pi image released by the openEuler is written to the SD card, the Raspberry Pi fails to be started. - -### Cause Analysis - -The possible causes are as follows: - -1. The downloaded image file is incomplete. To avoid this problem, ensure that the image passes the integrity verification. -2. An error occurs when the image is written to the SD card. In most cases, the error occurs when the image is written to the SD card in the Windows environment using the application software. - -### Solution - -Re-write the complete image to the SD card. \ No newline at end of file diff --git a/docs/en/docs/Installation/Installation-Guide1.md b/docs/en/docs/Installation/Installation-Guide1.md deleted file mode 100644 index 763dcf35f37ef5ffb54423573dbc20037130f21c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/Installation-Guide1.md +++ /dev/null @@ -1,188 +0,0 @@ -# Installation Guide - -This section describes how to enable the Raspberry Pi function after [Writing Raspberry Pi Images into the SD card](./安装方式介绍-1.html). - - -- [Installation Guide](#安装指导) - - [Starting the System](#启动系统) - - [Logging in to the System](#启动系统) - - [Configuring the System](#启动系统) - - [Expanding the Root Directory Partition](#扩展根目录分区) - - [Connecting to the Wi-Fi Network](#连接-wifi) - - -## Starting the System - -After an image is written into the SD card, insert the SD card into the Raspberry Pi and power on the SD card. - -For details about the Raspberry Pi hardware, visit the [Raspberry Pi official website](https://www.raspberrypi.org/). - -## Logging in to the System - -You can log in to the Raspberry Pi in either of the following ways: - -1. Local login - - Connect the Raspberry Pi to the monitor (the Raspberry Pi video output interface is Micro HDMI), keyboard, and mouse, and start the Raspberry Pi. The Raspberry Pi startup log is displayed on the monitor. After Raspberry Pi is started, enter the user name **root** and password **openeuler** to log in. - -2. SSH remote login - - By default, the Raspberry Pi uses the DHCP mode to automatically obtain the IP address. If the Raspberry Pi is connected to a known router, you can log in to the router to check the IP address. The new IP address is the Raspberry Pi IP address. - - **Figure 1** Obtain the IP address - ![](./figures/Obtain the IP address) - - According to the preceding figure, the IP address of the Raspberry Pi is **192.168.31.109**. You can run the `ssh root@192.168.31.109` command and enter the password `openeuler` to remotely log in to the Raspberry Pi. - -## Configuring the System - -### Expanding the Root Directory Partition - -The space of the default root directory partition is small. Therefore, you need to expand the partition capacity before using it. - -To expand the root directory partition capacity, perform the following procedure: - -1. Run the `fdisk -l` command as the root user to check the drive partition information. The command output is as follows: - - ``` - # fdisk -l - Disk /dev/mmcblk0: 14.86 GiB, 15931539456 bytes, 31116288 sectors - Units: sectors of 1 * 512 = 512 bytes - Sector size (logical/physical): 512 bytes / 512 bytes - I/O size (minimum/optimal): 512 bytes / 512 bytes - Disklabel type: dos - Disk identifier: 0xf2dc3842 - - Device Boot Start End Sectors Size Id Type - /dev/mmcblk0p1 * 8192 593919 585728 286M c W95 FAT32 (LBA) - /dev/mmcblk0p2 593920 1593343 999424 488M 82 Linux swap / Solaris - /dev/mmcblk0p3 1593344 5044223 3450880 1.7G 83 Linux - ``` - - The drive letter of the SD card is **/dev/mmcblk0**, which contains three partitions: - - - **/dev/mmcblk0p1**: boot partition - - **/dev/mmcblk0p2**: swap partition - - **/dev/mmcblk0p3**: root directory partition - - Here, we need to expand the capacity of `/dev/mmcblk0p3`. - -2. Run the `fdisk /dev/mmcblk0` command as the root user and the interactive command line interface (CLI) is displayed. To expand the partition capacity, perform the following procedure as shown in [Figure 2](#zh-cn_topic_0151920806_f6ff7658b349942ea87f4521c0256c315). - - 1. Enter `p` to check the partition information. - - Record the start sector number of `/dev/mmcblk0p3`. That is, the value in the `Start` column of the `/dev/mmcblk0p3` information. In the example, the start sector number is `1593344`. - - 2. Enter `d` to delete the partition. - - 3. Enter `3` or press `Enter` to delete the partition whose number is `3`. That is, the `/dev/mmcblk0p3`. - - 4. Enter `n` to create a partition. - - 5. Enter `p` or press `Enter` to create a partition of the `Primary` type. - - 6. Enter `3` or press `Enter` to create a partition whose number is `3`. That is, the `/dev/mmcblk0p3`. - - 7. Enter the start sector number of the new partition. That is, the start sector number recorded in Step `1`. In the example, the start sector number is `1593344`. - - > ![](./public_sys-resources/icon-notice.gif) **NOTE:** -Do not press **Enter** or use the default parameters. - - 8. Press `Enter` to use the last sector number by default as the end sector number of the new partition. - - 9. Enter `N` without changing the sector ID. - - 10. Enter `w` to save the partition settings and exit the interactive CLI. - - **Figure 2** Expand the partition capacity -![](./figures/Expand the partition capacity) - -3. Run the `fdisk -l` command as the root user to check the drive partition information and ensure that the drive partition is correct. The command output is as follows: - - ``` - # fdisk -l - Disk /dev/mmcblk0: 14.86 GiB, 15931539456 bytes, 31116288 sectors - Units: sectors of 1 * 512 = 512 bytes - Sector size (logical/physical): 512 bytes / 512 bytes - I/O size (minimum/optimal): 512 bytes / 512 bytes - Disklabel type: dos - Disk identifier: 0xf2dc3842 - - Device Boot Start End Sectors Size Id Type - /dev/mmcblk0p1 * 8192 593919 585728 286M c W95 FAT32 (LBA) - /dev/mmcblk0p2 593920 1593343 999424 488M 82 Linux swap / Solaris - /dev/mmcblk0p3 1593344 31116287 29522944 14.1G 83 Linux - ``` - -4. Run the `resize2fs /dev/mmcblk0p3` command as the root user to increase the size of the unloaded file system. - -5. Run the `df -lh` command to check the drive space information and ensure that the root directory partition has been expanded. - - > ![](./public_sys-resources/icon-notice.gif) **NOTE:** -If the root directory partition is not expanded, run the `reboot` command to restart the Raspberry Pi and then run the `resize2fs /dev/mmcblk0p3` command as the root user. - -### Connecting to the Wi-Fi Network - -To connect to the Wi-Fi network, perform the following procedure: - -1. Check the IP address and network adapter information. - - `ip a` - - Obtain information about the wireless network adapter **wlan0**: - - ``` - 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - valid_lft forever preferred_lft forever - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever - 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 - link/ether dc:a6:32:50:de:57 brd ff:ff:ff:ff:ff:ff - inet 192.168.31.109/24 brd 192.168.31.255 scope global dynamic noprefixroute eth0 - valid_lft 41570sec preferred_lft 41570sec - inet6 fe80::cd39:a969:e647:3043/64 scope link noprefixroute - valid_lft forever preferred_lft forever - 3: wlan0: mtu 1500 qdisc fq_codel state DOWN group default qlen 1000 - link/ether e2:e6:99:89:47:0c brd ff:ff:ff:ff:ff:ff - ``` - -2. Scan information about available Wi-Fi networks. - - `nmcli dev wifi` - -3. Connect to the Wi-Fi network. - - Run the `nmcli dev wifi connect SSID password PWD` command as the root user to connect to the Wi-Fi network. - - In the command, `SSID` indicates the SSID of the available Wi-Fi network scanned in the preceding step, and `PWD` indicates the password of the Wi-Fi network. For example, if the `SSID` is `openEuler-wifi`and the password is `12345678`, the command for connecting to the Wi-Fi network is `nmcli dev wifi connect openEuler-wifi password 12345678`. The connection is successful. - - ``` - Device 'wlan0' successfully activated with '26becaab-4adc-4c8e-9bf0-1d63cf5fa3f1'. - ``` - -4. Check the IP address and wireless network adapter information. - - `ip a` - - ``` - 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - valid_lft forever preferred_lft forever - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever - 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 - link/ether dc:a6:32:50:de:57 brd ff:ff:ff:ff:ff:ff - inet 192.168.31.109/24 brd 192.168.31.255 scope global dynamic noprefixroute eth0 - valid_lft 41386sec preferred_lft 41386sec - inet6 fe80::cd39:a969:e647:3043/64 scope link noprefixroute - valid_lft forever preferred_lft forever - 3: wlan0: mtu 1500 qdisc fq_codel state UP group default qlen 1000 - link/ether dc:a6:32:50:de:58 brd ff:ff:ff:ff:ff:ff - inet 192.168.31.110/24 brd 192.168.31.255 scope global dynamic noprefixroute wlan0 - valid_lft 43094sec preferred_lft 43094sec - inet6 fe80::394:d086:27fa:deba/64 scope link noprefixroute - valid_lft forever preferred_lft forever - ``` \ No newline at end of file diff --git a/docs/en/docs/Installation/Installation-Modes1.md b/docs/en/docs/Installation/Installation-Modes1.md deleted file mode 100644 index 30b15df258893f11d90d7249036a1f8d31e0d6fe..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/Installation-Modes1.md +++ /dev/null @@ -1,121 +0,0 @@ -# Installation Modes - -> ![](./public_sys-resources/icon-notice.gif) **NOTE:** -> -> - The hardware supports only Raspberry Pi 3B/3B+/4B. -> - The installation is performed by writing images to the SD card. This section describes how to write images using Windows, Linux, and Mac. -> - The image used in this section is the Raspberry Pi image of openEuler. For details about how to obtain the image, see [Installation Preparations](./安装准备-1.html). - - - -- [Installation Modes](./Installation Modes) - - [Writing Images Using Windows](./Writing Images Using Windows) - - [Formatting the SD Card](./Formatting the SD Card) - - [Writing Images to the SD Card](./Writing Images to the SD Card) - - [Writing Images Using Linux](./Writing Images Using Linux) - - [Checking Drive Partition Information](./Checking Drive Partition Information) - - [Unmouting the SD Card](./Unmouting the SD Card) - - [Writing Images to the SD Card](./Writing Images to the SD Card) - - [Writing Images Using the Mac OS](./Writing Images Using the Mac OS) - - [Checking Drive Partition Information](./Checking Drive Partition Information) - - [Unmouting the SD Card](./Unmouting the SD Card) - - [Writing Images to the SD Card](./Writing Images to the SD Card) - - -## Writing Images Using Windows - -This section uses Windows 10 as an example to describe how to write images to the SD card using Windows. - -### Formatting the SD Card - -To format the SD card, perform the following procedure: - -1. Download and install the tool for SD card formatting. The following operations use the SD Card Formatter as an example. - -2. Start the SD Card Formatter. In **Select card**, select the drive letter of the SD card to be formatted. - - If no image has been installed in the SD card, only one drive letter exists. In **Select card**, select the drive letter of the SD card to be formatted. - - If an image has been installed in the SD card, one or more drive letters exist. In **Select card**, select the drive letter E of the boot partition corresponding to the SD card to be formatted. The SD card corresponds to three drive letters: E, G, and H, as shown in the following figure: - - Figure 1 Drive letter -![](./figures/Drive letter) - -3. In **Formatting options**, select a formatting mode. The default mode is **Quick format**. - -4. Click **Format** to start formatting. A progress bar is displayed to show the formatting progress. - -5. After the formatting is completed, a dialog box is displayed, indicating that formatting is successfully completed. Click **OK**. - -### Writing Images to the SD Card - -> ![](./public_sys-resources/icon-notice.gif) **NOTE:** -If the compressed image file **openEuler-21.03-raspi-aarch64.img.xz** is obtained, decompress the file to obtain the **openEuler-21.03-raspi-aarch64.img** image file. - -To write the **openEuler-21.03-raspi-aarch64.img** image file to the SD card, perform the following procedure: - -1. Download and install the tool for writing images. The following operations use the Win32 Disk Imager as an example. -2. Start the Win32 Disk Imager and right-click **Run as administrator**. -3. Select the path of the image file in IMG format from the **Image File** drop-down list box. -4. In **Device**, select the drive letter of the SD card to which data is written. -5. Click **Write**. A progress bar is displayed to show the progress of writing data to the SD card. -6. After the write operation is completed, a dialog box is displayed, indicating that the write operation is successfully completed. Click **OK**. - -## Writing Images Using Linux - -This section describes how to write images to the SD card in the Linux environment. - -### Checking Drive Partition Information - -Run the `fdisk -l` command as the root user to obtain the information of the SD card and drive partitions. For example, the drive partition corresponding to the SD card can be /dev/sdb. - -### Unmouting the SD Card - -1. Run the `df -lh` command to check the mounted volumes. - -2. If the partitions corresponding to the SD card are not mounted, skip this step. If the partitions corresponding to the SD card are mounted, for example, /dev/sdb1 and /dev/sdb3, run the following commands as the root user to unmount the partitions: - - `umount /dev/sdb1` - - `umount /dev/sdb3` - -### Writing Images to the SD Card - -1. If the image obtained is compressed, run the `xz -d openEuler-21.03-raspi-aarch64.img.xz` command to decompress the compressed file to obtain the **openEuler-21.03-raspi-aarch64.img** image file. Otherwise, skip this step. - -2. Run the following command as the root user to write the `openEuler-21.03-raspi-aarch64.img` image to the SD card: - - `dd bs=4M if=openEuler-21.03-raspi-aarch64.img of=/dev/sdb` - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** Generally, the block size is set to 4 MB. If the write operation fails or the written image cannot be used, you can set the block size to 1 MB and try again. However, the write operation is time-consuming when the block size is set to 1 MB. - -## Writing Images Using the Mac OS - -This section describes how to flash images to the SD card in the Mac environment. - -### Checking Drive Partition Information - -Run the `diskutil list` command as user root to obtain the information of SD cards and drive partitions. For example, the drive partition corresponding to the SD card can be /dev/disk3. - -### Unmouting the SD Card - -1. Run the `df -lh` command to check the mounted volumes. - -2. If the partitions corresponding to the SD card are not mounted, skip this step. If the partitions corresponding to the SD card are mounted, for example, dev/disk3s1 and /dev/disk3s3, run the following commands as the root user to unmount the partitions: - - `diskutil umount /dev/disk3s1` - - `diskutil umount /dev/disk3s3` - -### Writing Images to the SD Card - -1. If the image obtained is compressed, run the `xz -d openEuler-21.03-raspi-aarch64.img.xz` command to decompress the compressed file to obtain the **openEuler-21.03-raspi-aarch64.img** image file. Otherwise, skip this step. - -2. Run the following command as the root user to write the image `openEuler-21.03-raspi-aarch64.img` to the SD card: - - `dd bs=4m if=openEuler-21.03-raspi-aarch64.img of=/dev/sdb` - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > - > Generally, the block size is set to 4 MB. If the write operation fails or the written image cannot be used, you can set the block size to 1 MB and try again. However, the write operation is time-consuming when the block size is set to 1 MB. - diff --git a/docs/en/docs/Installation/Installation-Preparations1.md b/docs/en/docs/Installation/Installation-Preparations1.md deleted file mode 100644 index f3fb9d318d1620a2cdb2b1c24e9d338b70891bb4..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/Installation-Preparations1.md +++ /dev/null @@ -1,125 +0,0 @@ -# Installation Preparations - -This section describes the compatibility of the hardware and software and the related configurations and preparations required for the installation. - - -- [Installation Preparations](#安装准备) - - [Obtaining the Installation Source](#获取安装源) - - [Verifying the Image Integrity](#镜像完整性校验) - - [Overview](#简介) - - [Prerequisites](#前提条件) - - [Procedure](#操作指导) - - [Installation Requirements](#安装要求) - - [Hardware Compatibility](#硬件兼容支持) - - [Minimum Hardware Specifications](#最小硬件要求) - - -## Obtaining the Installation Source - -Before installation, obtain the openEuler Raspberry Pi image and its verification file. - -1. Log in to the [openEuler Community](https://openeuler.org) website. - -2. Click **Download**. - -3. Click the **Link** provided after **Download ISO**. The download list is displayed. - -4. Click **openEuler-21.03**. The openEuler 21.03 version download list is displayed. - -5. Click **Raspberry Pi**. The Raspberry Pi image download list is displayed. - - - **aarch64**: image of the AArch64 architecture - -6. Click **aarch64** to download the Raspberry Pi AArch64 image download list. - -7. Click **openEuler-21.03-raspi-aarch64.img.xz** to download the openEuler Raspberry Pi image to the local PC. - -8. Click **openEuler-21.03-raspi-aarch64.img.xz.sha256sum** to download the verification file of the openEuler Raspberry Pi image to the local PC. - -## Verifying the Image Integrity - -### Overview - -During package transmission, to prevent software packages from being incompletely downloaded due to network or storage device problems, you need to verify the integrity of the software packages after obtaining them. Only the software packages that pass the verification can be deployed. - -Compare the verification value recorded in the verification file with the verification value that is manually calculated to determine whether the software package is complete. If the two values are the same, the downloaded file is complete. Otherwise, the downloaded file is incomplete and you need to obtain the software package again. - -### Prerequisites - -Before verifying the integrity of the image file, ensure that the following files are available: - -Image file: **openEuler-21.03-raspi-aarch64.img.xz** - -Verification file: **openEuler-21.03-raspi-aarch64.img.xz.sha256sum** - -### Procedure - -To verify the file integrity, perform the following procedure: - -1. Obtain the verification value from the verification file. Run the following command: - - ``` - $ cat openEuler-21.03-raspi-aarch64.img.xz.sha256sum - ``` - -2. Calculate the SHA256 verification value of the file. Run the following command: - - ``` - $ sha256sum openEuler-21.03-raspi-aarch64.img.xz - ``` - - After the command is executed, the verification value is displayed. - -3. Check whether the verification value calculated in step 1 is consistent with that calculated in step 2. - - If the verification values are consistent, the downloaded file is not damaged. Otherwise, the downloaded file is incomplete and you need to obtain the file again. - -## Installation Requirements - -If the openEuler operating system is installed in the Raspberry Pi environment, the Raspberry Pi environment must meet the hardware compatibility and minimum hardware specifications as follows. - -### Hardware Compatibility - -Currently, the openEuler Raspberry Pi image supports the 3B, 3B+, and 4B versions. - -### Minimum Hardware Specifications - -[Table 1](#tff48b99c9bf24b84bb602c53229e2542) lists the minimum hardware specifications for the openEuler Raspberry Pi image. - -**Table 1** Minimum hardware specifications - - - - - - - - - - - - - - - - - - - - - - -

Component Name

-

Minimum Hardware Specifications

-

Description

-

Raspberry Pi version

-
  • Raspberry Pi 3B
  • Raspberry Pi 3B+
  • Raspberry Pi 4B
-

-

-

Memory

-

≥ 2 GB (4 GB or higher recommended for better user experience)

-

-

-

Drive

-

8 GB or higher recommended for better user experience

-

-

-
- diff --git a/docs/en/docs/Installation/Installation.md b/docs/en/docs/Installation/Installation.md deleted file mode 100644 index 6ce6f6b20da525a6077f0bef45ef3dc0e9700266..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/Installation.md +++ /dev/null @@ -1,6 +0,0 @@ -# Installation Guide - -This guide describes how to install Huawei openEuler. - -This guide is intended for openEuler users with a basic understanding of Linux system management, and is also recommended for administrators, system engineers, and maintenance personnel. - diff --git a/docs/en/docs/Installation/More-Resources.md b/docs/en/docs/Installation/More-Resources.md deleted file mode 100644 index b8dd200267a33c4ec6d56d7c473095202df6bd68..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/More-Resources.md +++ /dev/null @@ -1,4 +0,0 @@ -# Reference - -- [How to Create a Raspberry Pi Image File](https://gitee.com/openeuler/raspberrypi/blob/master/documents/openEuler镜像的构建.md) -- [How to Use Raspberry Pi](https://gitee.com/openeuler/raspberrypi/blob/master/documents/树莓派使用.md) \ No newline at end of file diff --git a/docs/en/docs/Installation/faqs.md b/docs/en/docs/Installation/faqs.md deleted file mode 100644 index 02b52194e221812eea69a1a7d9d99c37c18ed271..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/faqs.md +++ /dev/null @@ -1,308 +0,0 @@ -# FAQs - -[[toc]] - -## Why Does openEuler Fail to Start After I Install It to the Second Disk? - -### Symptom - -The OS is installed on the second disk **sdb** during the installation. The openEuler fails to be started. - -### Possible Cause - -When openEuler is installed to the second disk, MBR and GRUB are installed to the second disk **sdb** by default. The following two situations may occur: - -1. openEuler installed on the first disk is loaded and started if it is complete. -2. openEuler installed on the first disk fails to be started from hard disks if it is incomplete. - -The preceding two situations occur because the first disk **sda** is booted by default to start openEuler on the BIOS window. If openEuler is not installed on the **sda** disk, system restart fails. - -### Solution - -This problem can be solved using either of the following two methods: - -- During the installation of openEuler, select the first disk or both disks, and install the boot loader on the first disk **sda**. -- After installing openEuler, restart it by modifying the boot option on the BIOS window. - - -## Why Does openEuler Enter Emergency Mode After It Is Powered On? - -### Symptom - -openEuler enters emergency mode after it is powered on. - -![](./figures/en-us_image_0229291264.jpg) - -### Possible Causes - -Damaged OS files result in disk mounting failure, or overpressured I/O results in disk mounting timeout \(threshold: 90s\). - -An unexpected system power-off, and low I/O performance of disks may also cause the problem. - -### Solution - -1. Enter the password of the **root** account to log in to openEuler. -2. Check and restore files by using the file system check \(fsck\) tool, and restart openEuler. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The fsck tool checks and maintains inconsistent file systems. If the system is powered off or a disk is faulty, run the **fsck** command to check file systems. Run the **fsck.ext3 -h** and **fsck.ext4 -h** commands to view the usage method of the fsck tool. - - -If you want to disable the timeout mechanism of disk mounting, add **x-systemd.device-timeout=0** to the **etc/fstab** file. For example: - -``` -# -# /etc/fstab -# Created by anaconda on Mon Sep 14 17:25:48 2015 -# -# Accessible filesystems, by reference, are maintained under '/dev/disk' -# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info -# -/dev/mapper/openEuler-root / ext4 defaults,x-systemd.device-timeout=0 0 0 -UUID=afcc811f-4b20-42fc-9d31-7307a8cfe0df /boot ext4 defaults,x-systemd.device-timeout=0 0 0 -/dev/mapper/openEuler-home /home ext4 defaults 0 0 -/dev/mapper/openEuler-swap swap swap defaults 0 0 -``` - -## Failed to Reinstall openEuler When a Logical Volume Group That Cannot Be Activated Has Existed in openEuler - -### Symptom - -After a disk fails, openEuler fails to be reinstalled because a logical volume group that cannot be activated has existed in openEuler. - -### Possible Cause - -During the installation of openEuler, a logical volume group cannot be activated. - -### Solution - -Before reinstalling openEuler, restore the abnormal logical volume group to the normal status or clear it. The following uses an example: - -- Restore the logical volume group. - 1. Run the following command to clear the active status of the abnormal logical volume group to ensure that the error message "Can't open /dev/sdc exclusively mounted filesystem" is not displayed: - - ``` - vgchange -a n testvg32947 - ``` - - 2. Run the following command to recreate a physical volume based on the backup file: - - ``` - pvcreate --uuid JT7zlL-K5G4-izjB-3i5L-e94f-7yuX-rhkLjL --restorefile /etc/lvm/backup/testvg32947 /dev/sdc - ``` - - 3. Run the following command to restore the logical volume group information: - - ``` - vgcfgrestore testvg32947 - ``` - - 4. Run the following command to reactivate the logical volume group: - - ``` - vgchange -ay testvg32947 - ``` - - -- Run the following commands to clear the logical volume group: - - ``` - vgchange -a n testvg32947 - vgremove -y testvg32947 - ``` - - -## An Exception Occurs During the Selection of the Installation Source - -### Symptom - -After the selection of the installation source, the message "Error checking software selection" is displayed. - -### Possible Cause - -This is because the software package dependency in the installation source is abnormal. - -### Solution - -Check whether the installation source is abnormal. Use the new installation source. - -## How Do I Manually Enable the kdump Service? - -### Symptom - -Run the **systemctl status kdump** command. The following information is displayed, indicating that no memory is reserved. - -![](./figures/en-us_image_0229291280.png) - -### Possible Cause - -The kdump service requires the system to reserve memory for running the kdump kernel. However, the system does not reserve memory for the kdump service. As a result, the kdump service cannot be started. - -### Solution - -For the scenario where the OS has been installed - -1. Add **crashkernel=1024M,high** to **/boot/efi/EFI/openEuler/grub.cfg**. -2. Restart the system for configuration to take effect. -3. Run the following command to check the kdump status: - - ``` - systemctl status kdump - ``` - - If the following information is displayed, the kdump status is **active**, indicating that the kdump service is enabled. No further action is required. - - ![](./figures/en-us_image_0229291272.png) - - -### Parameter Description - -The following table describes the parameters of the memory reserved for the kdump kernel. - -**Table 1** crashkernel parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Kernel Boot Parameter

-

Description

-

Default Value

-

Remarks

-

crashkernel=X

-

Reserve X of the physical memory for kdump when the physical memory is less than 4 GB.

-

None. You can adjust the value as required.

-

This configuration method is used only when the memory is less than 4 GB. Ensure that the continuous available memory is sufficient.

-

crashkernel=X@Y

-

Reserve X of the memory at the start address Y for kdump.

-

None. You can adjust the value as required.

-

Ensure that the X of the memory at the start address Y is not reserved for other modules.

-

crashkernel=X,high

-

Reserve 256 MB of the physical memory for kdump when the physical memory is less than 4 GB, and X of the physical memory for kdump when the physical memory is greater than or equal to 4 GB.

-

None. You can adjust the value based as required. The recommended value is 1024M,high.

-

Ensure that 256 MB of the memory is reserved for continuous use when the physical memory is less than 4 GB and X of the memory is reserved when the physical memory is greater than or equal to 4 GB. The actual reserved memory size equals 256 MB plus X.

-

crashkernel=X,low

-

crashkernel=Y,high

-

Reserve X of the physical memory for kdump when the physical memory is less than 4 GB and Y of the physical memory for kdump when the physical memory is greater than or equal to 4 GB.

-

None. You can adjust the value as required.

-

Ensure that X of the memory is reserved for continuous use when the physical memory is less than 4 GB and Y of the memory is reserved when the physical memory is greater than or equal to 4 GB. The actual reserved memory size equals X plus Y.

-
- -## Failed to Selected Only One Disk for Reinstallation When openEuler Was Installed on a Logical Volume Consisting of Multiple Disks - -### Symptom - -If openEuler was installed on a logical volume consisting of multiple disks, an error message will be displayed as shown in [Figure 1](#fig115949762617) when you attempt to select one of the disks for reinstallation. - -**Figure 1** Error message -![](./figures/error-message.png "error-message") - -### Possible Cause - -The previous logical volume contains multiple disks. If you select one of the disks for reinstallation, the logical volume will be damaged. - -### Solution - -The logical volume formed by multiple disks is equivalent to a volume group. Therefore, you only need to delete the corresponding volume group. - -1. Press **Ctrl**+**Alt**+**F2** to switch to the CLI and run the following command to find the volume group: - - ``` - vgs - ``` - - ![](./figures/en-us_image_0231657950.png) - -2. Run the following command to delete the volume group: - - ``` - vgremove euleros - ``` - -3. Run the following command to restart the installation program for the modification to take effect: - - ``` - systemctl restart anaconda - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >You can also press **Ctrl**+**Alt**+**F6** to return to the GUI and click **Refresh** in the lower right corner to refresh the storage configuration. - - -## Failed to Install openEuler on an x86 PM in UEFI Mode due to Secure Boot Option Setting - -### Symptom - -During the installation of openEuler on an x86 PM in UEFI mode, the system stays at the "No bootable device" page and the installation cannot continue because **secure boot** is set to **enabled** \(by default, it is set to **disabled**\), as shown in [Figure 2](#fig115949762617). - -**Figure 2** Dialog box showing "No bootable device" -![](./figures/dialog-box-showing-no-bootable-device.png "dialog-box-showing-no-bootable-device") - -### Possible Cause - -After **secure boot** is set to **enabled**, the mainboard verifies the boot program and OS. If the boot program and OS are not signed using the corresponding private key, the boot program and OS cannot pass the authentication of the built-in public key on the mainboard. - -### Solution - -Access the BIOS, set **secure boot** to **disabled**, and reinstall the openEuler. - -1. During the system startup, press **F11** and enter the password **Admin@9000** to access the BIOS. - - ![](./figures/bios.png) - -2. Choose **Administer Secure Boot**. - - ![](./figures/security.png) - -3. Set **Enforce Secure Boot** to **Disabled**. - - ![](./figures/enforce-secure-boot.png) - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >After **Enforce Secure Boot** is set to **Disabled**, save the settings, and exit. Then, reinstall the system. - -## The Installation Fails when a User Selects Two Drives on Which the OS Has Been Installed and Customizes Partitioning. - -### Symptom - -During the OS installation, the OS has been installed on two drives. In this case, if you select one drive for customized partitioning, and click **Cancel** to perform customized partitioning on the other drive, the installation fails. - -![](./figures/cancle_disk.png) - -![](./figures/custom_paratition.png) - -### Cause Analysis - -A user selects a drive for partitioning. After the user clicks **Cancel** and then selects the other drive, the drive information is incorrect. As a result, the installation fails. - -### Solution - -Select the target drive for customized partitioning. Do not frequently cancel the operation. If you have to cancel and select another drive, you are advised to reinstall the OS. - -### Learn More About the Issue at: - -https://gitee.com/src-openeuler/anaconda/issues/I29P84?from=project-issue \ No newline at end of file diff --git a/docs/en/docs/Installation/figures/Installation_source.png b/docs/en/docs/Installation/figures/Installation_source.png deleted file mode 100644 index 558374e3260e5218b6528ddd8d021606bf790787..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/Installation_source.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/Installation_wizard.png b/docs/en/docs/Installation/figures/Installation_wizard.png deleted file mode 100644 index 350936cd80af6661f39716dc0c9a49f281600c62..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/Installation_wizard.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/advanced-user-configuration.png b/docs/en/docs/Installation/figures/advanced-user-configuration.png deleted file mode 100644 index 59a188aece92ad19cc9b42f69e235d9a9d4f702a..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/advanced-user-configuration.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/bios.png b/docs/en/docs/Installation/figures/bios.png deleted file mode 100644 index d5a96738001c5a910174c030af583bb09ff29ce6..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/bios.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/cancle_disk.png b/docs/en/docs/Installation/figures/cancle_disk.png deleted file mode 100644 index f1db0f2c524695303f0d8791fcb3c256c75507ad..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/cancle_disk.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/completing-the-automatic-installation.png b/docs/en/docs/Installation/figures/completing-the-automatic-installation.png deleted file mode 100644 index f2169685ef202bae133ae74fec620ec64aea46df..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/completing-the-automatic-installation.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/confignetwork.png b/docs/en/docs/Installation/figures/confignetwork.png deleted file mode 100644 index 79903b72948a06d3fceff97c11f49d12f7571b94..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/confignetwork.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/creating-a-user.png b/docs/en/docs/Installation/figures/creating-a-user.png deleted file mode 100644 index 0e2befb0832d1167f5ffdcafdf7d9952d9ccdfbe..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/creating-a-user.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/custom_paratition.png b/docs/en/docs/Installation/figures/custom_paratition.png deleted file mode 100644 index d2e8c68e6af866e96bf5dd2a2f532de81c59a9d9..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/custom_paratition.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/dialog-box-showing-no-bootable-device.png b/docs/en/docs/Installation/figures/dialog-box-showing-no-bootable-device.png deleted file mode 100644 index 944c658d621f00b18e4aa75eaca420d76c08715c..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/dialog-box-showing-no-bootable-device.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/disk-encryption-password.png b/docs/en/docs/Installation/figures/disk-encryption-password.png deleted file mode 100644 index ba84e060133644910ff199376e11d2929cfe8d47..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/disk-encryption-password.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/drive-icon.png b/docs/en/docs/Installation/figures/drive-icon.png deleted file mode 100644 index b41fcb09dfbf805da4863142855e7c2de4bf4c7b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/drive-icon.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291229.png b/docs/en/docs/Installation/figures/en-us_image_0229291229.png deleted file mode 100644 index b315531ca7f99d2a045b7933351af96cadc1ad77..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291229.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291236.png b/docs/en/docs/Installation/figures/en-us_image_0229291236.png deleted file mode 100644 index bf466a3d751df4a4c6fd99aecf620ec9adf540a3..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291236.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291243.png b/docs/en/docs/Installation/figures/en-us_image_0229291243.png deleted file mode 100644 index 2418510f855facae4b47129840894490a1eac7ca..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291243.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291247.png b/docs/en/docs/Installation/figures/en-us_image_0229291247.png deleted file mode 100644 index d67b599b9ab74017c0800529053befed3efab8a7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291247.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291264.jpg b/docs/en/docs/Installation/figures/en-us_image_0229291264.jpg deleted file mode 100644 index 3f0a0658e08010f4f453e558a41e31257783b416..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291264.jpg and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291270.png b/docs/en/docs/Installation/figures/en-us_image_0229291270.png deleted file mode 100644 index deefef68670d64c131e4c41911a01236158f1dd1..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291270.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291272.png b/docs/en/docs/Installation/figures/en-us_image_0229291272.png deleted file mode 100644 index e0ad8102bddd886c3bd7a306b088e8a52e2b99c9..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291272.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291280.png b/docs/en/docs/Installation/figures/en-us_image_0229291280.png deleted file mode 100644 index 5754e734c48b23ace2a4fbf1302b820077cd7b71..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291280.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229291286.png b/docs/en/docs/Installation/figures/en-us_image_0229291286.png deleted file mode 100644 index 4ffcb081e2c8f82bcc49a65a939f2cd8bd6f949b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229291286.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0229420473.png b/docs/en/docs/Installation/figures/en-us_image_0229420473.png deleted file mode 100644 index 86c61a4b8e2a5795baff2fc74629924d01d7b97b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0229420473.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/en-us_image_0231657950.png b/docs/en/docs/Installation/figures/en-us_image_0231657950.png deleted file mode 100644 index bea985ef710c57aeba16600067304b1005ad92e8..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/en-us_image_0231657950.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/enforce-secure-boot.png b/docs/en/docs/Installation/figures/enforce-secure-boot.png deleted file mode 100644 index 0e40f5fd8d73dbcbad6bdcec5d56d3883d54023a..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/enforce-secure-boot.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/error-message.png b/docs/en/docs/Installation/figures/error-message.png deleted file mode 100644 index c5802a2b7a750eed8429ec06c7e4919a3d161a9e..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/error-message.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/host_env8.png b/docs/en/docs/Installation/figures/host_env8.png deleted file mode 100644 index d08dcc89f40e1671a55a42fbcb02f26e987a461e..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/host_env8.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/image-dialog-box.png b/docs/en/docs/Installation/figures/image-dialog-box.png deleted file mode 100644 index caeb56bb46f766dd39d66a65e308c591954d32cf..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/image-dialog-box.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/installation-process.png b/docs/en/docs/Installation/figures/installation-process.png deleted file mode 100644 index 2d219c7605ee75e73dffba1e2dd7c277968d4801..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/installation-process.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/installation-summary.png b/docs/en/docs/Installation/figures/installation-summary.png deleted file mode 100644 index d5ca555a2b2291e139b67098a7c23d29b23b8b24..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/installation-summary.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/installsourceen.png b/docs/en/docs/Installation/figures/installsourceen.png deleted file mode 100644 index 43e59b694ec1afcf8591e8272390da927da9a3fe..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/installsourceen.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/manual-partitioning-page.png b/docs/en/docs/Installation/figures/manual-partitioning-page.png deleted file mode 100644 index 6d61aa6c698b9dbc01255d7d646548e636482872..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/manual-partitioning-page.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/password-of-the-root-account.png b/docs/en/docs/Installation/figures/password-of-the-root-account.png deleted file mode 100644 index fe65e73a81e25e5fa90a13af707165911e7fa459..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/password-of-the-root-account.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/restart-icon.png b/docs/en/docs/Installation/figures/restart-icon.png deleted file mode 100644 index a1b02b2dff42c90845d2491192507ea6967352e3..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/restart-icon.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/security.png b/docs/en/docs/Installation/figures/security.png deleted file mode 100644 index 59ac7bfcef796fc32d0127a9d6095d32cb282fb2..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/security.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/selecting-a-language.png b/docs/en/docs/Installation/figures/selecting-a-language.png deleted file mode 100644 index 930bec7d3822a9e8289ee444a9fe6ee2dfdbad6c..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/selecting-a-language.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/selecting-installation-software.png b/docs/en/docs/Installation/figures/selecting-installation-software.png deleted file mode 100644 index c246e997d787d0d6a0439dcaf8780a09a9b72ca7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/selecting-installation-software.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/setting-a-system-language.png b/docs/en/docs/Installation/figures/setting-a-system-language.png deleted file mode 100644 index e8e6faa69580e707657cba3f2f589918321a4b4d..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/setting-a-system-language.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/setting-date-and-time.png b/docs/en/docs/Installation/figures/setting-date-and-time.png deleted file mode 100644 index 6e366072db2ca698ae2bc317a361e9d38877a2d0..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/setting-date-and-time.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/setting-the-installation-destination.png b/docs/en/docs/Installation/figures/setting-the-installation-destination.png deleted file mode 100644 index 224f165b222598aa140187bdfa9b1e75af36c0c5..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/setting-the-installation-destination.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/setting-the-keyboard-layout.png b/docs/en/docs/Installation/figures/setting-the-keyboard-layout.png deleted file mode 100644 index 62b0074220b8e2c8ebca37dceecc92e0c2fcdffc..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/setting-the-keyboard-layout.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/setting-the-network-and-host-name.png b/docs/en/docs/Installation/figures/setting-the-network-and-host-name.png deleted file mode 100644 index b17ebdaafeaa2228ddbe0d8135fee3eabdc1cb76..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/setting-the-network-and-host-name.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/sourceftp.png b/docs/en/docs/Installation/figures/sourceftp.png deleted file mode 100644 index 2e18d3f5c6d999c8a637ebed36ccb740a96d8449..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/sourceftp.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/sourcenfs.png b/docs/en/docs/Installation/figures/sourcenfs.png deleted file mode 100644 index 3a4564871319deb546776b2542575ed43f2f2a35..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/sourcenfs.png and /dev/null differ diff --git a/docs/en/docs/Installation/figures/startparam.png b/docs/en/docs/Installation/figures/startparam.png deleted file mode 100644 index b197f4d492213513edf84a99cdb14f186630a828..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/figures/startparam.png and /dev/null differ diff --git a/docs/en/docs/Installation/installation-guideline.md b/docs/en/docs/Installation/installation-guideline.md deleted file mode 100644 index 6942d2700c6993ca96244770ca7d1e7a93615ed7..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/installation-guideline.md +++ /dev/null @@ -1,342 +0,0 @@ -# Installation Guideline - -This section describes how to install openEuler using a CD-ROM. The installation process is the same for other installation modes except the boot option. - - - -- [Installation Guideline](#installation-guideline) - - [Starting the Installation](#starting-the-installation) - - [Booting from the CD/DVD-ROM Drive](#booting-from-the-cddvd-rom-drive) - - [Installation Wizard](#installation-wizard) - - [Installation in GUI Mode](#installation-in-gui-mode) - - [Configuring an Installation Program Language](#configuring-an-installation-program-language) - - [Entering the Installation Page](#entering-the-installation-page) - - [Setting the Keyboard Layout](#setting-the-keyboard-layout) - - [Setting a System Language](#setting-a-system-language) - - [Setting Date and Time](#setting-date-and-time) - - [Setting the Installation Source](#setting-the-installation-source) - - [Selecting Installation Software](#selecting-installation-software) - - [Setting the Installation Destination](#setting-the-installation-destination) - - [Storage Configuration](#storage-configuration) - - [Setting the Network and Host Name](#setting-the-network-and-host-name) - - [Setting the Root Password](#设置根密码) - - [Password Complexity](#密码复杂度) - - [Creating a User](#创建用户) - - [Starting the Installation](#开始安装) - - [Installation Procedure](#安装过程) - - [Completing the Installation](#安装完成) - - -## Starting the Installation - -### Booting from the CD/DVD-ROM Drive - -Load the ISO image of openEuler from the CD/DVD-ROM drive of the server and restart the server. The procedure is as follows: - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -> Before the installation, ensure that the server boots from the CD/DVD-ROM drive preferentially. The following steps describe how to install the openEuler using the virtual CD/DVD-ROM drive on the baseboard management controller (BMC). Installing the openEuler from a physical drive is simple. After the installation starts, the procedure for the physical drive is the same as that of the virtual drive. - -1. On the toolbar, click the icon shown in the following figure. - - **Figure 1** Drive icon - ![](./figures/drive-icon.png "drive-icon") - - An image dialog box is displayed, as shown in the following figure. - - **Figure 2** Image dialog box - ![](./figures/image-dialog-box.png "image-dialog-box") - -2. Select **Image File** and then click **Browse**. The **Open** dialog box is displayed. - -3. Select the image file and click **Open**. In the image dialog box, click **Connect**. If **Connect** changes to **Disconnect**, the virtual CD/DVD-ROM drive is connected to the server. - -4. On the toolbar, click the restart icon shown in the following figure to restart the device. - - **Figure 3** Restart icon - ![](./figures/restart-icon.png "restart-icon") - - -### Installation Wizard - -A boot menu is displayed after the system is booted using the boot medium. In addition to options for starting the installation program, some other options are available on the boot menu. During system installation, the **Test this media \& install openEuler 21.03** mode is used by default. Press the arrow keys on the keyboard to change the selection, and press **Enter** when the desired option is highlighted. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** -> ->- If you do not perform any operations within 1 minute, the system automatically selects the default option **Test this media \& install openEuler 21.03** and enters the installation page. ->- During PM installation, if you cannot use the arrow keys to select boot options and the system does not respond after you press **Enter**, click ![](./figures/en-us_image_0229420473.png) on the BMC page and configure **Key & Mouse Reset**. - -**Figure 4** Installation Wizard -![](./figures/Installation_wizard.png "installation-wizard") - -Installation wizard options are described as follows: - -- **Install openEuler 21.03**: Install openEuler on your server in GUI mode. - -- **Test this media & install openEuler 21.03**: Default option. Install openEuler on your server in GUI mode. The integrity of the installation medium is checked before the installation program is started. - -- **Troubleshooting**: Troubleshooting mode, which is used when the system cannot be installed properly. In troubleshooting mode, the following options are available: - - - **Install openEuler 21.03 in basic graphics mode**: Basic graphics installation mode. In this mode, the video driver is not started before the system starts and runs. - - **Rescue the openEuler system**: Rescue mode, which is used to restore the system. In rescue mode, the installation process is printed in the VNC or BMC, and the serial port is unavailable. - -On the installation wizard screen, press **e** to go to the parameter editing screen of the selected option, and press **c** to go to the command-line interface (CLI). - -### Installation in GUI Mode - -On the installation wizard page, select **Test this media \& install openEuler 21.03** to enter the GUI installation mode. - -Perform graphical installation operations using a keyboard. - -- Press **Tab** or **Shift**+**Tab** to move between GUI controls (such as buttons, area boxes, and check boxes). -- Press the up or down arrow key to move a target in the list. -- Press the left or right arrow key to move between the horizontal toolbar and watch bar. -- Press the spacebar or **Enter** to select or delete highlighted options, expand or collapse a drop-down list. -- Press **Alt**+a shortcut key (the shortcut key varies for different pages) to select the control where the shortcut key is located. The shortcut key can be highlighted (underlined) by holding down Alt. - - -## Configuring an Installation Program Language - -After the installation starts, the system will prompt the language that is used during the configuration installation process. English is configured by default, as shown in [Figure 5](#en-us_topic_0186390093_en-us_topic_0122145864_fig144630179151). Configure another language as required. - -**Figure 5** Selecting a language -![](./figures/selecting-a-language.png "selecting-a-language") - -After configurations, click **Continue**. The main installation configuration page is displayed. - -If you want to exit the installation, click **Exit**. The message "Are you sure you want to exit the installation program?" is displayed. Click **Yes** in the dialog box to go back to the installation wizard page. - -## Entering the Installation Page - -After the installation program starts, the main installation configuration page is displayed, as shown in [Figure 6](#en-us_topic_0186390094_en-us_topic_0122145883_fig5969171592212). On the page, you can configure the time, language, installation source, network, and storage device. - -Some configuration items are matched with safety symbols. A safety symbol will disappear after the item is configured. Start the installation only when all the safety symbols disappear from the page. - -If you want to exit the installation, click **Exit**. The message "Are you sure you want to exit the installation program?" is displayed. Click **Yes** in the dialog box to go back to the installation wizard page. - -**Figure 6** Installation summary -![](./figures/installation-summary.png "installation-summary") - -## Setting the Keyboard Layout - -On the **INSTALLATION SUMMARY** page, click **KEYBOARD**. You can add or delete multiple keyboard layouts in the system. - -- On the left white box, click to select the keyboard layout and click the keyboard under the box. -- To test the keyboard layout: On the left white box, click to select the keyboard layout, click the inside of the right text box, and enter the text to ensure that the keyboard layout can work properly. - -**Figure 7** Setting the keyboard layout -![](./figures/setting-the-keyboard-layout.png "setting-the-keyboard-layout") - -After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - -## Setting a System Language - -On the **INSTALLATION SUMMARY** page, click **LANGUAGE SUPPORT** to set the system language, as shown in [Figure 8](#en-us_topic_0186390098_en-us_topic_0122145772_fig187301927172619). Set another language as required, such as Chinese. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** -> -> - If you select **Chinese**, the system does not support the display of Chinese characters when you log in to the system using VNC, but supports the display of Chinese characters when you log in to the system using a serial port. When you log in to the system using SSH, whether the system supports the display of Chinese characters depends on the SSH client. If you select **English**, the display is not affected. - -**Figure 8** Setting a system language -![](./figures/setting-a-system-language.png "setting-a-system-language") - -After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - -## Setting Date and Time - -On the **INSTALLATION SUMMARY** page, click **TIME \& DATE**. On the **TIME \& DATE** page, set the system time zone, date, and time. - -When setting the time zone, you can click a specific city on the map with the mouse, or select a region from the drop-down list of **Region** or a city from the drop-down list of **City** at the top of the page, as shown in [Figure 9](#en-us_topic_0186390096_en-us_topic_0122145900_fig1260162652312). - -If your city is not displayed on the map or in the drop-down list, select the nearest city in the same time zone. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** -> ->- Before manually setting the time zone, disable the network time synchronization function in the upper right corner. ->- If you want to use the network time, ensure that the network can connect to the remote NTP server. For details about how to set the network, see [Setting the Network and Host Name](#setting-the-network-and-host-name). - -**Figure 9** Setting date and time -![](./figures/setting-date-and-time.png "setting-date-and-time") - -After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - -## Setting the Installation Source - -On the **INSTALLATION SUMMARY** page, click **INSTALLATION SOURCE** to locate the installation source. - -* When you use the complete CD-ROM for installation, the installation program automatically detects and displays the installation source information. You can use the default settings, as shown in [Figure 10](#zh-cn_topic_0186390100_zh-cn_topic_0144427079_fig93633295132): - - **Figure 10** Installation source -![](./figures/Installation_source.png) - -* When the network source is used for installation, you need to set the URL of the network source. - - - HTTP or HTTPS mode - - The following figure shows the installation source in HTTP or HTTPS mode: - - ![](./figures/installsourceen.png) - - If the HTTPS server uses a private certificate, press **e** on the installation wizard page to go to the parameter editing page of the selected option, and add the **inst.noverifyssl** parameter. - - Enter the actual installation source address, for example, **https://repo.openeuler.org/openEuler-21.03/OS/aarch64**, in the text box. **openEuler-21.03** indicates the version number, and **aarch64** indicates the CPU architecture. - - - FTP mode - - The following figure shows the installation source in FTP mode. Enter the FTP address in the text box. - - ![](./figures/sourceftp.png) - - You need to set up an FTP server, mount the **openEuler-21.03-aarch64-dvd.iso** image, and copy the mounted files to the shared directory on the FTP server. **aarch64** indicates the CPU architecture. You can use images as required. - - - NFS mode - - The following figure shows the installation source in NFS mode. Enter the NFS address in the text box. - - ![](./figures/sourcenfs.png) - - You need to set up an NFS server, mount the **openEuler-21.03-aarch64-dvd.iso** image, and copy the mounted file to the shared directory on the NFS server. **aarch64** indicates the CPU architecture. You can use images as required. - -During the installation, if you have any questions about configuring the installation source, see [An Exception Occurs During the Selection of the Installation Source](./faqs.html#an-exception-occurs-during-the-selection-of-the-installation-source). - -After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - -## Selecting Installation Software - -On the **INSTALLATION SUMMARY** page, click **SOFTWARE SELECTION** to specify the software package to be installed. - -Based on the actual requirements, select **Minimal Install** on the left box and select an add-on in the **Add-Ons for Selected Environment** area on the right, as shown in [Figure 11](#en-us_topic_0186390261_en-us_topic_0122145865_fig03031519101414). - -**Figure 11** Selecting installation software -![](./figures/selecting-installation-software.png "selecting-installation-software") - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** ->- In **Minimal Install** mode, not all packages in the installation source will be installed. If the required package is not installed, you can mount the installation source to the local PC and configure a repo source, and use DNF to install the package. ->- If you select **Virtual Host**, the virtualization components QEMU, libvirt, and edk2 are installed by default. You can select whether to install the OVS component in the add-on area. - -After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - -## Setting the Installation Destination - -On the **INSTALLATION SUMMARY** page, click **INSTALLATION DESTINATION** to select the OS installation disk and partition. - -You can view available local storage devices in [Figure 12](#fig1195417125015). - -> ![](./public_sys-resources/icon-notice.gif) **NOTICE:** -When selecting the device to be installed, you are advised not to use the NVMe SSD storage medium as the OS installation disk. - -**Figure 12** Setting the installation destination -![](./figures/setting-the-installation-destination.png "setting-the-installation-destination") - -### Storage Configuration - -On the **INSTALLATION DESTINATION** page, configure storage for system partition. You can either manually configure partitions or select **Automatic** to automatically configure partitioning. - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -> -> - During partitioning, to ensure system security and performance, you are advised to divide the device into the following partitions: **/boot**, **/var**, **/var/log**, **/var/log/audit**, **/home**, and **/tmp**. -> - If the system is configured with the swap partition, the swap partition is used when the physical memory of the system is insufficient. Although the swap partition can be used to expand the physical memory, if the swap partition is used due to insufficient memory, the system response slows and the system performance deteriorates. Therefore, you are not advised to configure the swap partition in the system with sufficient physical memory or the performance sensitive system. -> - If you need to split a logical volume group, select **Custom** to manually partition the logical volume group. On the **MANUAL PARTITIONING** page, click **Modify** in the **Volume Group** area to reconfigure the logical volume group. - -**Automatic** - -Select **Automatic** if the software is installed in a new storage device or the data in the storage device is not required. After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - -**Customize** - -If you need to manually partition the disk, click **Customize** and click **Done** in the upper left corner. The following page is displayed. - -On the **MANUAL PARTITIONING** page, you can partition the disk in either of the following ways. After the partitioning is completed, the window shown in [Figure 14](#fig1277151815248) is displayed. - -- Automatic creation: Click **Click here to create them automatically**. The system automatically assigns four mount points according to the available storage space: **/boot**, **/**, **/boot/efi**, and **swap**. -- Manual creation: Click ![](./figures/en-us_image_0229291243.png) to add a mount point. It is recommended that the expected capacity of each mount point not exceed the available space. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** -If the expected capacity of the mount point exceeds the available space, the system allocates the remaining available space to the mount point. - -**Figure 13** MANUAL PARTITIONING page -![](./figures/manual-partitioning-page.png "manual-partitioning-page") - -After the setting is complete, click **Done** in the upper left corner to go back to the **SUMMARY OF CHANGES** page. -click **Accept Changes** to go back to the **INSTALLATION SUMMARY** page. - -## Setting the Network and Host Name - -On the **INSTALLATION SUMMARY** page, select **NETWORK \& HOST NAME** to configure the system network functions. - -The installation program automatically detects a local access interface. The detected interface is listed in the left box, and the interface details are displayed in the right-hand area, as shown in [Figure 14](#en-us_topic_0186390264_en-us_topic_0122145831_fig123700157297). You can enable or disable a network interface by clicking the switch in the upper right corner of the page. The switch is turned off by default. If the installation source is set to be the network, turn on the switch. You can also click **Configure** to configure the selected interface. Select **Connect automatically with priority** to enable the NIC to start automatically upon system startup, as shown in Figure 16. - -In the lower left box, enter the host name. The host name can be the fully quantified domain name (FQDN) in the format of hostname.domainname or the brief host name in the format of hostname. - -**Figure 14** Setting the network and host name -![](./figures/setting-the-network-and-host-name.png "setting-the-network-and-host-name") - -**Figure 15** Config the network -![](./figures/confignetwork.png "config-the-network") - -After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - -## Setting the Root Password - -Select **Root Password** on the **INSTALLATION SUMMARY** page. The **Root Password** page is displayed, as shown in [Figure 15](#zh-cn_topic_0186390266_zh-cn_topic_0122145909_fig1323165793018). Enter a password based on [Password Complexity](#密码复杂度) requirements and confirm the password. - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -> -> - The root account is used to perform key system management tasks. You are not advised to use the root account for daily work or system access. -> -> - If you select **Lock root account** on the **Root Password** page, the root account will be disabled. - -**Figure 15** Root password -![](./figures/password-of-the-root-account.png "Root password") - -### Password Complexity - -The password of the **root** user or the password of the new user must meet the password complexity requirements. Otherwise, the password configuration or user creation will fail. The password complexity requirements are as follows: - -1. A password must contain at least eight characters. - -2. A password must contain at least three of the following types: uppercase letters, lowercase letters, digits, and special characters. - -3. A password must be different from the account name. - -4. A password cannot contain words in the dictionary. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** -In the installed openEuler environment, you can run the `cracklib-unpacker /usr/share/cracklib/pw_dict > dictionary.txt` command to export the dictionary library file **dictionary.txt**, and then check whether the password is in the dictionary. - -After the settings are completed, click **Done** in the upper left corner to return to the **INSTALLATION SUMMARY** page. - -## Creating a User - -Click **User Creation**. [Figure 16](#en-us_topic_0186390266_en-us_topic_0122145909_fig1237715313319) shows the page for creating a user. Enter a username and set a password. By clicking **Advanced**, you can also configure a home directory and a user group, as shown in [Figure 17](#en-us_topic_0186390266_en-us_topic_0122145909_fig128716531312). - -**Figure 16** Creating a user -![](./figures/creating-a-user.png "creating-a-user") - -**Figure 17** Advanced user configuration -![](./figures/advanced-user-configuration.png "advanced-user-configuration") - -After configuration, click **Done** in the left-upper corner to switch back to the installation process page. - -## Starting Installation - -On the installation page, after all the mandatory items are configured, the safety symbols will disappear. Then, you can click **Begin Installation** to install the system. - -## Installation Procedure - -After the installation starts, the overall installation progress and the progress of writing the software package to the system are displayed. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** -If you click **Exit** or reset or power off the server during the installation, the installation is interrupted and the system is unavailable. In this case, you need to reinstall the system. - -**Figure 18** Installation process -![](./figures/installation-process.png "installation-process") - -## Completing the Installation - -openEuler has been installed, Click **Reboot** to restart the system. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** -> - If the physical DVD-ROM is used to install the OS and the DVD-ROM drive is not automatically ejected during the restart, manually remove the DVD-ROM. Then, the openEuler CLI login page is displayed. -> - If the virtual DVD-ROM drive is used to install the OS, change the server boot option to **Hard Disk** and restart the server. Then, the openEuler CLI login page is displayed. - - diff --git a/docs/en/docs/Installation/installation-mode.md b/docs/en/docs/Installation/installation-mode.md deleted file mode 100644 index b043a6ada2863c059a603a7797fe38b497400934..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/installation-mode.md +++ /dev/null @@ -1,206 +0,0 @@ -# Installation Mode - ->![](./public_sys-resources/icon-notice.gif) **NOTICE:** ->- Only TaiShan 200 and FusionServer Pro servers are supported. For details about the supported server models, see [Hardware Compatibility](./installation-preparations.html#hardware-compatibility). Only a virtualization platform created by the virtualization components \(openEuler as the host OS and QEMU and KVM provided in the release package\) of openEuler and the x86 virtualization platform of Huawei public cloud are supported. ->- Currently, only installation modes such as CD-ROM, USB flash drive, network, QCOW2 image, and private image are supported. In addition, only the x86 virtualization platform of Huawei public cloud supports the private image installation mode. - - - -- [Installation Mode](#installation-mode) - - [Installation Through a CD/DVD-ROM](#installation-through-a-cddvd-rom) - - [Preparing the Installation Source](#preparing-the-installation-source) - - [Starting the Installation](#starting-the-installation) - - [Installation Through a USB Flash Drive](#installation-through-a-usb-flash-drive) - - [Preparing the Installation Source](#preparing-the-installation-source-1) - - [Starting the Installation](#starting-the-installation-1) - - [Installation Through the Network Using PXE](#installation-through-the-network-using-pxe) - - [Installation Through a QCOW2 Image](#installation-through-a-qcow2-image) - - [Creating a QCOW2 Image](#creating-a-qcow2-image) - - [Starting the Installation](#starting-the-installation-2) - - [Installation Through a Private Image](#installation-through-a-private-image) - - [Creating a Private Image](#creating-a-private-image) - - [Starting the Installation](#starting-the-installation-3) - - - -## Installation Through a CD/DVD-ROM - -This section describes how to create or use a CD/DVD-ROM to install the openEuler. - -### Preparing the Installation Source - -If you have obtained a CD/DVD-ROM, install the OS using the CD/DVD-ROM. If you have obtained an ISO file, record the ISO file to a DVD and install the OS using the obtained DVD. - -### Starting the Installation - -Perform the following operations to start the installation: - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Set the system to preferentially boot from the CD/DVD-ROM drive. Take the BIOS as an example. You need to move the **CD/DVD-ROM Drive** option under **Boot Type Order** to the top. - -1. Disconnect all drives that are not required, such as USB drives. -2. Start your computer system. -3. Insert the installation CD/DVD-ROM into the CD/DVD-ROM drive. -4. Restart the computer system. - -After a short delay, a graphical wizard page is displayed, which contains different boot options. If you do not perform any operation within one minute, the installation starts automatically with the default options. - -## Installation Through a USB Flash Drive - -This section describes how to create or use a USB flash drive to install the openEuler. - -### Preparing the Installation Source - -Pay attention to the capacity of the USB flash drive. The USB flash drive must have sufficient space to store the entire image. It is recommended that the USB flash drive has more than 16 GB space. - -1. Connect the USB flash drive to the system and run the **dmesg** command to view related log. At the end of the log, you can view the information generated by the USB flash drive that is just connected. The information is similar to the following: - - ``` - [ 170.171135] sd 5:0:0:0: [sdb] Attached SCSI removable disk - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Take the **sdb** USB flash drive as an example. - -2. Switch to user **root**. When running the **su** command, you need to enter the password. - - ``` - $ su - root - ``` - -3. Ensure that the USB flash drive is not mounted. Run the following command: - - ``` - # findmnt /dev/sdb - ``` - - - If no command output is displayed, the file system is not mounted. Go to the next step. - - - If the following information is displayed, the USB flash drive is automatically mounted. - - ``` - # findmnt /dev/sdb - TARGET SOURCE FSTYPE OPTIONS - /mnt/iso /dev/sdb iso9660 ro,relatime - ``` - - In this case, you need to run the **umount** command to uninstall the device. - - ``` - # umount /mnt/iso - ``` - -4. Run the **dd** command to write the ISO image to the USB flash drive. - - ``` - # dd if=/path/to/image.iso of=/dev/device bs=blocksize - ``` - - Replace **/path/to/image.iso** with the complete path of the downloaded ISO image file, replace **device** with the device name provided by the **dmesg** command, and set a proper block size \(for example, 512 KB\) to replace **blocksize** to accelerate the write progress. - - For example, if the ISO image file name is **/home/testuser/Downloads/openEuler-21.03-aarch64-dvd.iso** and the detected device name is **sdb**, run the following command: - - ``` - # dd if=/home/testuser/Downloads/openEuler-21.03-aarch64-dvd.iso of=/dev/sdb bs=512k - ``` - -5. After the image is written, remove the USB flash drive. - - No progress is displayed during the image write process. When the number sign \(\#\) appears again, the write is complete. Exit the **root** account and remove the USB flash drive. In this case, you can use the USB flash drive as the installation source of the system. - - -### Starting the Installation - -Perform the following operations to start the installation: - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Set the system to preferentially boot from the USB flash drive. Take the BIOS as an example. You need to move the **USB** option under **Boot Type Order** to the top. - -1. Disconnect all drives that are not required. -2. Open your computer system. -3. Insert the USB flash drive into the computer. -4. Restart the computer system. - -After a short delay, a graphical wizard page is displayed, which contains different boot options. If you do not perform any operation within one minute, the installation program automatically starts the installation. - -## Installation Through the Network Using PXE - -To boot with PXE, you need to properly configure the server and your computer's network interface to support PXE. - -If the target hardware is installed with a PXE-enabled NIC, we can configure it to boot the computer from network system files rather than local media \(such as CD-ROMs\) and execute the Anaconda installation program. - -For installation through the network using PXE, the client uses a PXE-enabled NIC to send a broadcast request for DHCP information and IP address to the network. The DHCP server provides the client with an IP address and other network information, such as the IP address or host name of the DNS and FTP server \(which provides the files required for starting the installation program\), and the location of the files on the server. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The TFTP, DHCP, and HTTP server configurations are not described here. For details, see [Full-automatic Installation Guide](./using-kickstart-for-automatic-installation.html#full-automatic-installation-guide). - -## Installation Through a QCOW2 Image - -This section describes how to create or use a QCOW2 image to install the openEuler. - -### Creating a QCOW2 Image - -1. Install the **qemu-img** software package. - - ``` - # dnf install -y qemu-img - ``` - -2. Run the **create** command of the qemu-img tool to create an image file. The command format is as follows: - - ``` - $ qemu-img create -f -o - ``` - - The parameters are described as follows: - - - _imgFormat_: Image format. The value can be **raw** or **qcow2**. - - _fileOption_: File option, which is used to set features of an image file, such as specifying a backend image file, compression, and encryption. - - _fileName_: File name. - - _diskSize_: Disk size, which specifies the size of a block disk. The unit can be K, M, G, or T, indicating KiB, MiB, GiB, or TiB. - - For example, to create an image file **openEuler-imge.qcow2** whose disk size is 32 GB and format is qcow2, the command and output are as follows: - - ``` - $ qemu-img create -f qcow2 openEuler-image.qcow2 32G - Formatting 'openEuler-image.qcow2', fmt=qcow2 size=34359738368 cluster_size=65536 lazy_refcounts=off refcount_bits=16 - ``` - - -### Starting the Installation - -Perform the following operations to start the installation: - -1. Prepare a QCOW2 image file. -2. Prepare the VM network. -3. Prepare the UEFI boot tool set EDK II. -4. Prepare the VM XML configuration file. -5. Create a VM. -6. Start the VM. - -For details, see the [*Virtualization User Guide*](./../Virtualization/virtualization.html). - -## Installation Through a Private Image - -This section describes how to create or use a private image to install the openEuler. - -### Creating a Private Image - -For instructions about how to create a private image, see [*Image Management Service User Guide*](https://support.huaweicloud.com/intl/en-us/usermanual-ims/en-us_topic_0013901628.html). - -### Starting the Installation - -For details about how to start the x86 virtualization platform of Huawei public cloud, see [Elastic Cloud Server User Guide](https://support.huaweicloud.com/intl/en-us/wtsnew-ims/index.html). - - - - - - - - - - - - - diff --git a/docs/en/docs/Installation/installation-preparations.md b/docs/en/docs/Installation/installation-preparations.md deleted file mode 100644 index c62e2d3e74d05ade6ce6ba1a54b097fcb9da77f8..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/installation-preparations.md +++ /dev/null @@ -1,164 +0,0 @@ -# Installation Preparations - -This section describes the compatibility of the hardware and software and the related configurations and preparations required for the installation. - - - -- [Installation Preparations](#installation-preparations) - - [Obtaining the Installation Source](#obtaining-the-installation-source) - - [Release Package Integrity Check](#release-package-integrity-check) - - [Introduction](#introduction) - - [Prerequisites](#prerequisites) - - [Procedure](#procedure) - - [Installation Requirements for PMs](#installation-requirements-for-pms) - - [Hardware Compatibility](#hardware-compatibility) - - [Minimum Hardware Specifications](#minimum-hardware-specifications) - - [Installation Requirements for VMs](#installation-requirements-for-vms) - - [Virtualization Platform Compatibility](#virtualization-platform-compatibility) - - [Minimum Virtualization Space](#minimum-virtualization-space) - - - -## Obtaining the Installation Source - -Obtain the openEuler release package and verification file before the installation. - -Perform the following operations to obtain the openEuler release package: - -1. Log in to the [openEuler Community](https://openeuler.org/zh/) website. -2. Click **Download**. -3. Choose the card **openEuler-21.03**. Click the link provided after **Download ISO**.The download list is displayed. - - **aarch64**: ISO image file of the AArch64 architecture - - **x86\_64**: ISO image file of the x86\_64 architecture - - **source**: ISO image file of the openEuler source code -4. Select the openEuler release package and verification file to be downloaded that adapt to the architecture of the environment to be installed. - - AArch64 architecture: - 1. Click **aarch64**. - 2. If you install the environment on the local host, download the release package **openEuler-21.03-aarch64-dvd.iso** and the verification file **openEuler-21.03-aarch64-dvd.iso.sha256sum** to the local host. - 3. If you install the environment on the network, download the release package **openEuler-21.03-netinst-aarch64-dvd.iso** and the verification file **openEuler-21.03-netinst-aarch64-dvd.iso.sha256sum** to the local host. - - - x86_64 architecture: - 1. Click **x86_64**. - 2. If you install the environment on the local host, download the release package **openEuler-21.03-x86_64-dvd.iso** and the verification file **openEuler-21.03-x86_64-dvd.iso.sha256sum** to the local host. - 3. If you install the environment on the network, download the release package **openEuler-21.03-netinst-x86_64-dvd.iso** and the verification file **openEuler-21.03-netinst-x86_64-dvd.iso.sha256sum** to the local host. - ->![](./public_sys-resources/icon-note.gif) **Note** -> When the network is available, install the environment on the network because the ISO release package is small. - -## Release Package Integrity Check - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->This section describes how to verify the integrity of the release package in the AArch64 architecture. The procedure for verifying the integrity of the release package in the x86\_64 architecture is the same. - -### Introduction - -To prevent the software package from being incompletely downloaded due to network or storage device faults during transmission, you need to verify the integrity of the software package after obtaining it. Only the software packages that pass the verification can be installed. - -Compare the verification value recorded in the verification file with the .iso file verification value calculated manually to check whether the software package passes the verification. If the verification values are consistent, the .iso file is not damaged. If they are inconsistent, you can confirm that the file is damaged and you need to obtain the file again. - -### Prerequisites - -Before verifying the integrity of the release package, you need to prepare the following files: - -ISO file: **openEuler-21.03-aarch64-dvd.iso** - -Verification file: **openEuler-21.03-aarch64-dvd.iso.sha256sum** - -### Procedure - -To verify the file integrity, perform the following operations: - -1. Obtain the verification value in the verification file. Run the following command: - - ``` - $ cat openEuler-21.03-aarch64-dvd.iso.sha256sum - ``` - -2. Calculate the SHA256 verification value of the file. Run the following command: - - ``` - $ sha256sum openEuler-21.03-aarch64-dvd.iso - ``` - - After the command is run, the verification value is displayed. - -3. Check whether the values calculated in step 1 and step 2 are consistent. - - If the verification values are consistent, the .iso file is not damaged. If they are inconsistent, you can confirm that the file is damaged and you need to obtain the file again. - -## Installation Requirements for PMs - -To install the openEuler OS on a PM, the PM must meet the following hardware compatibility and minimum hardware requirements. - -### Hardware Compatibility - -You need to take hardware compatibility into account during openEuler installation. [Table 1](#table14948632047) describes the types of supported servers. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** -> ->- TaiShan 200 servers are backed by Huawei Kunpeng 920 processors. ->- Currently, only Huawei TaiShan and FusionServer Pro servers are supported. More servers from other vendors will be supported in the future. - -**Table 1** Supported servers - -| Server Type | Server Name | Server Model | -| :---- | :---- | :---- | -| Rack server | TaiShan 200 | 2280 balanced model | -| Rack server | FusionServer Pro | FusionServer Pro 2288H V5
NOTE:
The server must be configured with the Avago SAS3508 RAID controller card and the LOM-X722 NIC.| - -### Minimum Hardware Specifications - -[Table 2](#tff48b99c9bf24b84bb602c53229e2541) lists the minimum hardware specifications supported by openEuler. - -**Table 2** Minimum hardware specifications - -| Component | Minimum Hardware Specifications | -| :---- | :---- | -| Architecture | AArch64 or x86_64 | -| CPU | Two CPUs | -| Memory | ≥ 4 GB (8 GB or higher recommended for better user experience) | -| Hard disk | ≥ 120 GB (for better user experience) | - -## Installation Requirements for VMs - -To install the openEuler OS on a VM, the VM must meet the following hardware compatibility and minimum hardware requirements. - -### Virtualization Platform Compatibility - -When installing openEuler, pay attention to the compatibility of the virtualization platform. Currently, the following virtualization platforms are supported: - -- A virtualization platform created by the virtualization components \(openEuler as the host OS and QEMU and KVM provided in the release package\) of openEuler -- x86 virtualization platform of Huawei public cloud - -### Minimum Virtualization Space - -[Table 3](#tff48b99c9bf24b84bb602c53229e2541) lists the minimum virtualization space required by openEuler. - -**Table 3** Minimum virtualization space - -| Component | Minimum Virtualization Space | -| :---- | :---- | -| Architecture | AArch64 or x86_64 | -| CPU | Two CPUs| -| Memory | ≥ 4 GB (8 GB or higher recommended for better user experience) | -| Hard disk | ≥ 32 GB (120 GB or higher recommended for better user experience) | - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en/docs/Installation/public_sys-resources/icon-caution.gif b/docs/en/docs/Installation/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/Installation/public_sys-resources/icon-danger.gif b/docs/en/docs/Installation/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/Installation/public_sys-resources/icon-note.gif b/docs/en/docs/Installation/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/Installation/public_sys-resources/icon-notice.gif b/docs/en/docs/Installation/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/Installation/public_sys-resources/icon-tip.gif b/docs/en/docs/Installation/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/Installation/public_sys-resources/icon-warning.gif b/docs/en/docs/Installation/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Installation/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/Installation/using-kickstart-for-automatic-installation.md b/docs/en/docs/Installation/using-kickstart-for-automatic-installation.md deleted file mode 100644 index 19aca36dcb9a7a5ac520494b6324b72abaef141c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Installation/using-kickstart-for-automatic-installation.md +++ /dev/null @@ -1,366 +0,0 @@ -# Using Kickstart for Automatic Installation - - -- [Using Kickstart for Automatic Installation](#using-kickstart-for-automatic-installation) - - [Introduction](#introduction) - - [Overview](#overview) - - [Advantages and Disadvantages](#advantages-and-disadvantages) - - [Background](#background) - - [Semi-automatic Installation Guide](#semi-automatic-installation-guide) - - [Environment Requirements](#environment-requirements) - - [Procedure](#procedure) - - [Full-automatic Installation Guide](#full-automatic-installation-guide) - - [Environment Requirements](#environment-requirements-1) - - [Procedure](#procedure-1) - - - -## Introduction - -### Overview - -You can use the kickstart tool to automatically install the openEuler OS in either of the following ways: - -- Semi-automatic installation: You only need to specify the location of the kickstart file. Kickstart automatically configures OS attributes such as keyboard, language, and partitions. -- Automatic installation: The OS is automatically installed. - -### Advantages and Disadvantages - -[Table 1](#table1388812373315) lists the advantages and disadvantages of semi-automatic installation and full-automatic installation using kickstart. You can select an installation mode as required. - -**Table 1** Advantages and disadvantages - - - - - - - - - - - - - - - - -

Installation Mode

-

Advantage

-

Disadvantage

-

Semi-automatic installation

-

Services such as TFTP, PXE, and DHCP do not need to be prepared.

-

You need to manually specify the path of the kickstart file.

-

Full-automatic installation

-

The OS is installed automatically.

-

Services, such as TFTP, DHCPD, and PXE, need to be configured.

-
- -### Background - -**Kickstart** - -Kickstart is an unattended installation mode. The principle of kickstart is to record typical parameters that need to be manually entered during the installation and generate the configuration file **ks.cfg**. During the installation, the installation program searches the **ks.cfg** configuration file first for required parameters. If no matching parameters are found, you need to manually configure these parameters. If all required parameters are covered by the kickstart file, automatic installation can be achieved by only specifying the path of the kickstart file. - -Both full-automatic or semi-automatic installation can be achieved by kickstart. - -**PXE** - -Pre-boot Execution Environment \(PXE\)\) works in client/server network mode. The PXE client can obtain an IP address from the DHCP server during the startup and implement client boot and installation through the network based on protocols such as trivial file transfer protocol \(TFTP\). - -**TFTP** - -TFTP is used to transfer simple and trivial files between clients and the server. - -## Semi-automatic Installation Guide - -### Environment Requirements - -The environment requirements for semi-automatic installation of openEuler using kickstart are as follows: - -- PM/VM \(for details about how to create VMs, see the documents from corresponding vendors\): includes the computer where kickstart is used for automatic installation and the computer where the kickstart tool is installed. -- Httpd: stores the kickstart file. -- ISO: openEuler-21.03-aarch64-dvd.iso - -### Procedure - -To use kickstart to perform semi-automatic installation of openEuler, perform the following steps: - -**Environment Preparation** - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Before the installation, ensure that the firewall of the HTTP server is disabled. Run the following command to disable the firewall: ->``` ->iptables -F ->``` - -1. Install httpd and start the service. - - ``` - # dnf install httpd -y - # systemctl start httpd - # systemctl enable httpd - ``` - -2. Run the following commands to prepare the kickstart file: - - ``` - # mkdir /var/www/html/ks - #vim /var/www/html/ks/openEuler-ks.cfg ===>The file can be obtained by modifying the anaconda-ks.cfg file automatically generated from openEuler, or can be created using the system-config-kickstart tool. - ==================================== - ***Modify the following information as required.*** - #version=DEVEL - ignoredisk --only-use=sda - autopart --type=lvm - # Partition clearing information - clearpart --none --initlabel - # Use graphical install - graphical - # Use CDROM installation media - cdrom - # Keyboard layouts - keyboard --vckeymap=cn --xlayouts='cn' - # System language - lang zh_CN.UTF-8 - - # Network information - network --bootproto=dhcp --device=enp4s0 --ipv6=auto --activate - network --hostname=openeuler.com - # Root password - rootpw --iscrypted $6$fQE83lxEZ48Or4zc$j7/PlUMHn29yTjCD4Fi44WTZL/RzVGxJ/7MGsZMl6QfE3KjIVT7M4UrhFXbafvRq2lUddAFcyWHd5WRmXfEK20 - # Run the Setup Agent on first boot - firstboot --enable - # Do not configure the X Window System - skipx - # System services - services --disabled="chronyd" - # System timezone - timezone Asia/Shanghai --isUtc--nontp - - %packages - @^minimal-environment - @standard - - %end - - %anaconda - pwpolicy root --minlen=8 --minquality=1 --notstrict --nochanges --notempty - pwpolicy user --minlen=8 --minquality=1 --notstrict --nochanges --emptyok - pwpolicy luks --minlen=8 --minquality=1 --notstrict --nochanges --notempty - %end - - %post - #enable kdump - sed -i "s/ ro / ro crashkernel=1024M,high /" /boot/efi/EFI/openEuler/grub.cfg - %end - ===================================== - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >The method of generating the password ciphertext is as follows: - >``` - ># python3 - >Python 3.7.0 (default, Apr 1 2019, 00:00:00) - >[GCC 7.3.0] on linux - >Type "help", "copyright", "credits" or "license" for more information. - >>>> import crypt - >>>> passwd = crypt.crypt("myPasswd") - >>>> print (passwd) - >$6$63c4tDmQGn5SDayV$mZoZC4pa9Jdt6/ALgaaDq6mIExiOO2EjzomB.Rf6V1BkEMJDcMddZeGdp17cMyc9l9ML9ldthytBEPVcnboR/0 - >``` - -3. Mount the ISO image file to the CD-ROM drive of the computer where openEuler is to be installed. - - If you want to install openEuler through the NFS, specify the path \(which is **cdrom** by default\) of installation source in the kickstart file. - - -**Installing the System** - -1. The installation selection dialog box is displayed. - 1. On the installation wizard page in [Starting the Installation](./installation-guideline.html#starting-the-installation), select **Install openEuler 21.03** and press **e**. - 2. Add **inst.ks=http://server ip/ks/openEuler-ks.cfg** to the startup parameters. - - ![](./figures/startparam.png) - - 3. Press **Ctrl**+**x** to start the automatic installation. - -2. Verify that the installation is complete. - - After the installation is complete, the system automatically boots. If the first boot option of the system is set to the CD_ROM, the installation page is displayed again. Shut down the computer and change startup option to start from the hard disk preferentially. - - ![](./figures/completing-the-automatic-installation.png) - - -## Full-automatic Installation Guide - -### Environment Requirements - -The environment requirements for full-automatic installation of openEuler using kickstart are as follows: - -- PM/VM \(for details about how to create VMs, see the documents from corresponding vendors\): includes the computer where kickstart is used for automatic installation and the computer where the kickstart tool is installed. -- Httpd: stores the kickstart file. -- TFTP: provides vmlinuz and initrd files. -- DHCPD/PXE: provides the DHCP service. -- ISO: openEuler-21.03-aarch64-dvd.iso - -### Procedure - -To use kickstart to perform full-automatic installation of openEuler, perform the following steps: - -**Environment Preparation** - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Before the installation, ensure that the firewall of the HTTP server is disabled. Run the following command to disable the firewall: ->``` ->iptables -F ->``` - -1. Install httpd and start the service. - - ``` - # dnf install httpd -y - # systemctl start httpd - # systemctl enable httpd - ``` - -2. Install and configure TFTP. - - ``` - # dnf install tftp-server -y - # vim /etc/xinetd.d/tftp - service tftp - { - socket_type = dgram - protocol = udp - wait = yes - user = root - server = /usr/sbin/in.tftpd - server_args = -s /var/lib/tftpboot - disable = no - per_source = 11 - cps = 100 2 - flags = IPv4 - } - # systemctl start tftp - # systemctl enable tftp - # systemctl start xinetd - # systemctl status xinetd - # systemctl enable xinetd - ``` - -3. Run the following commands to prepare the installation source: - - ``` - # mount openEuler-21.03-aarch64-dvd.iso /mnt - # cp -r /mnt/* /var/www/html/openEuler/ - ``` - -4. Set and modify the kickstart configuration file **openEuler-ks.cfg**. Select the HTTP installation source by referring to [3](#en-us_topic_0229291289_l1692f6b9284e493683ffa2ef804bc7ca). - - ``` - #vim /var/www/html/ks/openEuler-ks.cfg - ==================================== - ***Modify the following information as required.*** - #version=DEVEL - ignoredisk --only-use=sda - autopart --type=lvm - # Partition clearing information - clearpart --none --initlabel - # Use graphical install - graphical - # Keyboard layouts - keyboard --vckeymap=cn --xlayouts='cn' - # System language - lang zh_CN.UTF-8 - #Use http installation source - url --url=//192.168.122.1/openEuler/ - %post - #enable kdump - sed -i "s/ ro / ro crashkernel=1024M,high /" /boot/efi/EFI/openEuler/grub.cfg - %end - ... - ``` - -5. Modify the PXE configuration file **grub.cfg** as follows: - - ``` - # cp -r /mnt/images/pxeboot/* /var/lib/tftpboot/ - # cp /mnt/EFI/BOOT/grubaa64.efi /var/lib/tftpboot/ - # cp /mnt/EFI/BOOT/grub.cfg /var/lib/tftpboot/ - # ls /var/lib/tftpboot/ - grubaa64.efi grub.cfg initrd.img TRANS.TBL vmlinuz - # vim /var/lib/tftpboot/grub.cfg - set default="1" - - function load_video { - if [ x$feature_all_video_module = xy ]; then - insmod all_video - else - insmod efi_gop - insmod efi_uga - insmod ieee1275_fb - insmod vbe - insmod vga - insmod video_bochs - insmod video_cirrus - fi - } - - load_video - set gfxpayload=keep - insmod gzio - insmod part_gpt - insmod ext2 - - set timeout=60 - - - ### BEGIN /etc/grub.d/10_linux ### - menuentry 'Install openEuler 21.03 ' --class red --class gnu-linux --class gnu --class os { - set root=(tftp,192.168.1.1) - linux /vmlinuz ro inst.geoloc=0 console=ttyAMA0 console=tty0 rd.iscsi.waitnet=0 inst.ks=http://192.168.122.1/ks/openEuler-ks.cfg - initrd /initrd.img - } - ``` - -6. Run the following commands to configure DHCP \(which can be replaced by DNSmasq\): - - ``` - # dnf install dhcp -y - # - # DHCP Server Configuration file. - # see /usr/share/doc/dhcp-server/dhcpd.conf.example - # see dhcpd.conf(5) man page - # - # vim /etc/dhcp/dhcpd.conf - ddns-update-style interim; - ignore client-updates; - filename "grubaa64.efi"; # pxelinux location of the startup file; - next-server 192.168.122.1; # (IMPORTANT) TFTP server IP address; - subnet 192.168.122.0 netmask 255.255.255.0 { - option routers 192.168.111.1; # Gateway address - option subnet-mask 255.255.255.0; # Subnet mask - range dynamic-bootp 192.168.122.50 192.168.122.200; # Dynamic IP address range - default-lease-time 21600; - max-lease-time 43200; - } - # systemctl start dhcpd - # systemctl enable dhcpd - ``` - - -**Installing the System** - -1. On the **Start boot option** screen, press **F2** to boot from the PXE and start automatic installation. - - ![](./figures/en-us_image_0229291270.png) - - ![](./figures/en-us_image_0229291286.png) - - ![](./figures/en-us_image_0229291247.png) - -2. The automatic installation window is displayed. -3. Verify that the installation is complete. - - ![](./figures/completing-the-automatic-installation.png) - diff --git a/docs/en/docs/Quickstart/figures/Installation_wizard.png b/docs/en/docs/Quickstart/figures/Installation_wizard.png deleted file mode 100644 index 350936cd80af6661f39716dc0c9a49f281600c62..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/Installation_wizard.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/advanced-user-configuration.png b/docs/en/docs/Quickstart/figures/advanced-user-configuration.png deleted file mode 100644 index 59a188aece92ad19cc9b42f69e235d9a9d4f702a..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/advanced-user-configuration.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/creating-a-user.png b/docs/en/docs/Quickstart/figures/creating-a-user.png deleted file mode 100644 index 0e2befb0832d1167f5ffdcafdf7d9952d9ccdfbe..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/creating-a-user.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/drive-icon.png b/docs/en/docs/Quickstart/figures/drive-icon.png deleted file mode 100644 index b41fcb09dfbf805da4863142855e7c2de4bf4c7b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/drive-icon.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/en-us_image_0229420473.png b/docs/en/docs/Quickstart/figures/en-us_image_0229420473.png deleted file mode 100644 index 86c61a4b8e2a5795baff2fc74629924d01d7b97b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/en-us_image_0229420473.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/image-dialog-box.png b/docs/en/docs/Quickstart/figures/image-dialog-box.png deleted file mode 100644 index caeb56bb46f766dd39d66a65e308c591954d32cf..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/image-dialog-box.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/installation-process.png b/docs/en/docs/Quickstart/figures/installation-process.png deleted file mode 100644 index 2d219c7605ee75e73dffba1e2dd7c277968d4801..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/installation-process.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/installation-summary.png b/docs/en/docs/Quickstart/figures/installation-summary.png deleted file mode 100644 index d5ca555a2b2291e139b67098a7c23d29b23b8b24..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/installation-summary.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/password-of-the-root-account.png b/docs/en/docs/Quickstart/figures/password-of-the-root-account.png deleted file mode 100644 index fe65e73a81e25e5fa90a13af707165911e7fa459..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/password-of-the-root-account.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/restart-icon.png b/docs/en/docs/Quickstart/figures/restart-icon.png deleted file mode 100644 index a1b02b2dff42c90845d2491192507ea6967352e3..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/restart-icon.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/selecting-a-language.png b/docs/en/docs/Quickstart/figures/selecting-a-language.png deleted file mode 100644 index 930bec7d3822a9e8289ee444a9fe6ee2dfdbad6c..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/selecting-a-language.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/selecting-installation-software.png b/docs/en/docs/Quickstart/figures/selecting-installation-software.png deleted file mode 100644 index c246e997d787d0d6a0439dcaf8780a09a9b72ca7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/selecting-installation-software.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/setting-the-boot-device.png b/docs/en/docs/Quickstart/figures/setting-the-boot-device.png deleted file mode 100644 index 42455bcd651b98a08b012b275d5f170daf07ac59..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/setting-the-boot-device.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/figures/setting-the-installation-destination.png b/docs/en/docs/Quickstart/figures/setting-the-installation-destination.png deleted file mode 100644 index 224f165b222598aa140187bdfa9b1e75af36c0c5..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/figures/setting-the-installation-destination.png and /dev/null differ diff --git a/docs/en/docs/Quickstart/public_sys-resources/icon-caution.gif b/docs/en/docs/Quickstart/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/Quickstart/public_sys-resources/icon-danger.gif b/docs/en/docs/Quickstart/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/Quickstart/public_sys-resources/icon-note.gif b/docs/en/docs/Quickstart/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/Quickstart/public_sys-resources/icon-notice.gif b/docs/en/docs/Quickstart/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/Quickstart/public_sys-resources/icon-tip.gif b/docs/en/docs/Quickstart/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/Quickstart/public_sys-resources/icon-warning.gif b/docs/en/docs/Quickstart/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Quickstart/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/Quickstart/quick-start.md b/docs/en/docs/Quickstart/quick-start.md deleted file mode 100644 index 51eb4231f8fb832675bea3050294e82a822d573c..0000000000000000000000000000000000000000 --- a/docs/en/docs/Quickstart/quick-start.md +++ /dev/null @@ -1,355 +0,0 @@ -# Quick Start - -This document uses openEuler 21.03 installed on the TaiShan 200 server as an example to describe how to quickly install and use openEuler OS. For details about the installation requirements and methods, see [ Installation Guide](./../Installation/Installation.html). - - - -- [Quick Start](#quick-start) - - [Installation Preparations](#installation-preparations) - - [Obtaining the Installation Source](#obtaining-the-installation-source) - - [Release Package Integrity Check](#release-package-integrity-check) - - [Starting the Installation](#starting-the-installation) - - [Installation](#installation) - - [Viewing System Information](#viewing-system-information) - - -## Installation Preparations - -- Hardware Compatibility - - [Table 1](#table14948632047) describes the types of supported servers. - - **Table 1** Supported servers - - - - - - - - - - - - - - - - - -

Server Type

-

Server Name

-

Server Model

-

Rack server

-

TaiShan 200

-

2280 balanced model

-

Rack server

-

FusionServer Pro

-

FusionServer Pro 2288H V5

-
NOTE:

The server must be configured with the Avago SAS3508 RAID controller card and the LOM-X722 NIC.

-
-
- -- Minimum Hardware Specifications - - [Table 2](#tff48b99c9bf24b84bb602c53229e2541) lists the minimum hardware specifications supported by openEuler. - - **Table 2** Minimum hardware specifications - - - - - - - - - - - - - - - - - - - - - - - - - -

Component

-

Minimum Hardware Specifications

-

Description

-

Architecture

-
  • AArch64
  • x86_64
-
  • 64-bit Arm architecture
  • 64-bit Intel x86 architecture
-

CPU

-
  • Huawei Kunpeng 920 series
  • Intel ® Xeon® processor
-

-

-

Memory

-

≥ 4 GB (8 GB or higher recommended for better user experience)

-

-

-

Hard disk

-

≥ 120 GB (for better user experience)

-

The hard disk supports IDE, SATA, SAS interfaces.

-
- - -## Obtaining the Installation Source - -Perform the following operations to obtain the openEuler release package: - -> ![](./public_sys-resources/icon-note.gif) **NOTE:** -The release packages of the AArch64 architecture support the UEFI mode, and the release packages of the x86\_64 architecture support the UEFI and Legacy modes. - -1. Log in to the [openEuler Community](https://openeuler.org) website. - -2. Click **Download**. - -3. Click the link provided after **Download ISO**. The download list is displayed. - -4. Click **openEuler-21.03**. The openEuler 21.03 version download list is displayed. - -5. Click **ISO**. The ISO download list is displayed. - - - **aarch64**: ISO image file of the AArch64 architecture - - **x86\_64**: ISO image file of the x86\_64 architecture - - **source**: ISO image file of the openEuler source code - -6. Select the openEuler release package and verification file to be downloaded based on the architecture of the environment to be installed. - - - If the AArch64 architecture is used: - - 1. Click **aarch64**. - 2. Click **openEuler-21.03-aarch64-dvd.iso** to download the openEuler release package to the local host. - 3. Click **openEuler-21.03-aarch64-dvd.iso.sha256sum** to download the openEuler verification file to the local host. - - - If the x86\_64 architecture is used: - - 1. Click **x86\_64**. - 2. Click **openEuler-21.03-x86\_64-dvd.iso** to download the openEuler release package to the local host. - 3. Click **openEuler-21.03-x86\_64-dvd.iso.sha256sum** to download the openEuler verification file to the local host. - -## Release Package Integrity Check - -To prevent incomplete download of the software package due to network or storage device problems during the transmission, you can perform the following steps to check the integrity of the obtained openEuler software package: - -1. Obtain the verification value in the verification file. Run the following command: - - ``` - $cat openEuler-21.03-aarch64-dvd.iso.sha256sum - ``` - -2. Calculate the SHA256 verification value of the file. Run the following command: - - ``` - $sha256sum openEuler-21.03-aarch64-dvd.iso - ``` - - After the command is run, the verification value is displayed. - -3. Check whether the values calculated in step 1 and step 2 are consistent. - - If the verification values are consistent, the .iso file is not damaged. If they are inconsistent, you can confirm that the file is damaged and you need to obtain the file again. - -## Starting the Installation - -1. Log in to the iBMC WebUI. - - For details, see [TaiShan 200 Server User Guide (Model 2280)](https://support.huawei.com/enterprise/en/doc/EDOC1100093459). - -2. Choose **Configuration** from the main menu, and select **Boot Device** from the navigation tree. The **Boot Device** page is displayed. - - Set **Effective** and **Boot Medium** to **One-time** and **DVD-ROM**, respectively, and click **Save**, as shown in [Figure 1](#fig1011938131018). - - **Figure 1** Setting the boot device -![](./figures/setting-the-boot-device.png "setting-the-boot-device") - -3. Choose **Remote Console** from the main menu. The **Remote Console** page is displayed. - - Select an integrated remote console as required to access the remote virtual console, for example, **Java Integrated Remote Console (Shared)**. - -4. On the toolbar, click the icon shown in the following figure. - - **Figure 2** Drive icon -![](./figures/drive-icon.png "drive-icon") - - An image dialog box is displayed, as shown in the following figure. - - **Figure 3** Image dialog box -![](./figures/image-dialog-box.png "image-dialog-box") - -5. Select **Image File** and then click **Browse**. The **Open** dialog box is displayed. - -6. Select the image file and click **Open**. In the image dialog box, click **Connect**. If **Connect** changes to **Disconnect**, the virtual CD/DVD-ROM drive is connected to the server. - -7. On the toolbar, click the restart icon shown in the following figure to restart the device. - - **Figure 4** Restart icon -![](./figures/restart-icon.png "restart-icon") - -8. A boot menu is displayed after the system restarts, as shown in [Figure 5](#fig1648754873314). - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > - > - If you do not perform any operations within 1 minute, the system automatically selects the default option **Test this media \& install openEuler 21.03** and enters the installation page. - > - During PM installation, if you cannot use the arrow keys to select boot options and the system does not respond after you press **Enter**, click ![](./figures/en-us_image_0229420473.png) on the BMC page and configure **Key \& Mouse Reset**. - - **Figure 5** Installation Wizard - ![](./figures/Installation_wizard.png "Installation_wizard") - -9. On the installation wizard page, press **Enter** to select the default option **Test this media \& install openEuler 21.03** to enter the GUI installation page. - -## Installation - -After entering the GUI installation page, perform the following operations to install the system: - -1. Set an installation language. The default language is English. You can change the language based on the site requirements, as shown in [Figure 6](#fig874344811484). - - **Figure 6** Selecting a language - ![](./figures/selecting-a-language.png "selecting-a-language") - -2. On the **INSTALLATION SUMMARY** page, set configuration items based on the site requirements. - - - A configuration item with an alarm symbol must be configured. When the alarm symbol disappears, you can perform the next operation. - - A configuration item without an alarm symbol is configured by default. - - You can click **Begin Installation** to install the system only when all alarms are cleared. - - **Figure 7** Installation summary - ![](./figures/installation-summary.png "installation-summary") - - 1. Select **Software Selection** to set configuration items. - - Based on the site requirements, select **Minimal Install** on the left box and select an add-on in the **Add-Ons for Selected Environment** area on the right, as shown in [Figure 8](#fig1133717611109). - - **Figure 8** Selecting installation software - ![](./figures/selecting-installation-software.png "selecting-installation-software") - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > - > - In **Minimal Install** mode, not all packages in the installation source will be installed. If the required package is not installed, you can mount the installation source to the local PC and configure a repo source, and use DNF to install the package. - > - If you select **Virtual Host**, the virtualization components QEMU, libvirt, and edk2 are installed by default. You can select whether to install the OVS component in the add-on area. - - After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - - 2. Select **Installation Destination** to set configuration items. - - On the **INSTALLATION DESTINATION** page, select a local storage device. - - > ![](./public_sys-resources/icon-notice.gif) **NOTICE:** - > When selecting the device to be installed, you are advised not to use the NVMe SSD storage medium as the OS installation disk. - - You also need to configure the storage to partition the system. You can either manually configure partitions or select **Automatic** to automatically configure partitioning. Select **Automatic** if the software is installed in a new storage device or the data in the storage device is not required, as shown in [Figure 9](#fig153381468101). - - **Figure 9** Setting the installation destination - ![](./figures/setting-the-installation-destination.png "setting-the-installation-destination") - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > - > - During partitioning, to ensure system security and performance, you are advised to divide the device into the following partitions: **/boot**, **/var**, **/var/log**, **/var/log/audit**, **/home**, and **/tmp**. - > - If the system is configured with the swap partition, the swap partition is used when the physical memory of the system is insufficient. Although the swap partition can be used to expand the physical memory, if the swap partition is used due to insufficient memory, the system response slows and the system performance deteriorates. Therefore, you are not advised to configure the swap partition in the system with sufficient physical memory or the performance sensitive system. - > - If you need to split a logical volume group, select **Custom** to manually partition the logical volume group. On the **MANUAL PARTITIONING** page, click **Modify** in the **Volume Group** area to reconfigure the logical volume group. - - After the setting is complete, click **Done** in the upper left corner to go back to the **INSTALLATION SUMMARY** page. - - 3. Select **Root Password** and set the root password. - - On the **ROOT PASSWORD** page, enter a password that meets the [Password Complexity](#密码复杂度) requirements and confirm the password, as shown in [Figure 10](#zh-cn_topic_0186390266_zh-cn_topic_0122145909_fig1323165793018). - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > - > - The root account is used to perform key system management tasks. You are not advised to use the root account for daily work or system access. - > - > - If you select **Lock root account** on the **Root Password** page, the root account will be disabled. - - **Password Complexity** - - The password of user root or a new user must meet the password complexity requirements. Otherwise, the password setting or user creation will fail. The password must meet the following requirements: - - 1. Contain at least eight characters. - - 2. Contain at least three of the following: uppercase letters, lowercase letters, digits, and special characters. - - 3. Different from the user name. - - 4. Not allowed to contain words in the dictionary. - - > ![](./public_sys-resources/icon-note.gif) **NOTE:** - > In the openEuler environment, you can run the `cracklib-unpacker /usr/share/cracklib/pw_dict > dictionary.txt` command to export the dictionary library file **dictionary.txt**. You can check whether the password is in this dictionary. - - **Figure 10** Root password - ![](./figures/password-of-the-root-account.png "Root password") - - After the settings are completed, click **Done** in the upper left corner to return to the **INSTALLATION SUMMARY** page. - - 4. Select **Create a User** and set the parameters. - - Figure 11](#zh-cn_topic_0186390266_zh-cn_topic_0122145909_fig1237715313319) shows the page for creating a user. Enter the user name and set the password. The password complexity must be the same as that of the root password. In addition, you can set the home directory and user group by clicking **Advanced**, as shown in [Figure 12](#zh-cn_topic_0186390266_zh-cn_topic_0122145909_fig1237715313319). - - **Figure 11** Creating a user - ![](./figures/creating-a-user.png "creating-a-user") - - **Figure 12** Advanced user configuration - ![](./figures/advanced-user-configuration.png "Advanced user configuration") - After the settings are completed, click **Done** in the upper left corner to return to the **INSTALLATION SUMMARY** page. - - 5. Set other configuration items. You can use the default values for other configuration items. - -3. Click **Start the Installation** to install the system, as shown in [Figure 13](#zh-cn_topic_0186390266_zh-cn_topic_0122145909_fig1237715313319). - - **Figure 13** Starting the installation - ![](./figures/installation-process.png "installation-process") -4. After the installation is completed, restart the system. - - openEuler has been installed. Click **Reboot** to restart the system. - -## Viewing System Information - -After the system is installed and restarted, the system CLI login page is displayed. Enter the username and password set during the installation to log in to openEuler OS and view the following system information. For details about system management and configuration, see the [openEuler 21.03 Administrator Guide](./../Administration/administration.html). - -- Run the following command to view the system information: - - ``` - cat /etc/os-release - ``` - - For example, the command and output are as follows: - - ``` - $ cat /etc/os-release - NAME="openEuler" - VERSION="21.03" - ID="openEuler" - VERSION_ID="21.03" - PRETTY_NAME="openEuler 21.03" - ANSI_COLOR="0;31" - ``` - -- View system resource information. - - Run the following command to view the CPU information: - - ``` - lscpu - ``` - - Run the following command to view the memory information: - - ``` - free - ``` - - Run the following command to view the disk information: - - ``` - fdisk -l - ``` - -- Run the following command to view the IP address: - - ``` - ip addr - ``` \ No newline at end of file diff --git a/docs/en/docs/Releasenotes/acknowledgement.md b/docs/en/docs/Releasenotes/acknowledgement.md deleted file mode 100644 index 47c049cdd996b1d7f590a023e095661e90599b30..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/acknowledgement.md +++ /dev/null @@ -1,4 +0,0 @@ -# Acknowledgement - -We sincerely thank all the members who participated in and assisted in the openEuler project. It is your hard work to make the version released successfully and provide the possibility for the better development of openEuler. - diff --git a/docs/en/docs/Releasenotes/common-vulnerabilities-and-exposures-(cve).md b/docs/en/docs/Releasenotes/common-vulnerabilities-and-exposures-(cve).md deleted file mode 100644 index eadc4c0a4ef5e520bd7666df52848975ff96247d..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/common-vulnerabilities-and-exposures-(cve).md +++ /dev/null @@ -1,4 +0,0 @@ -# Common Vulnerabilities and Exposures \(CVE\) - -For CVE involved in the version, see the [CVE list](https://cve.openeuler.org/#/CVE). - diff --git a/docs/en/docs/Releasenotes/contribution.md b/docs/en/docs/Releasenotes/contribution.md deleted file mode 100644 index 5ac221e1384a2093fdbb6effa314b17f5f896add..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/contribution.md +++ /dev/null @@ -1,22 +0,0 @@ -# Contribution - -As an openEuler user, you can contribute to the openEuler community in multiple ways. For details about how to contribute to the community, see [How to Contribute](https://openeuler.org/en/community/contribution/). Here, some methods are listed for reference. - -## Special Interest Groups \(SIGs\) - -openEuler brings together people of common interest to form different special interest groups \(SIGs\). For details about existing SIGs, see the [SIG list](https://openeuler.org/en/sig/sig-list/). - -You are welcome to join an existing SIG or create a SIG. For details about how to create a SIG, see the [SIG Management Procedure](https://gitee.com/openeuler/community/blob/master/en/technical-committee/governance/README.md). - -## Mail List and Tasks - -You are welcome to actively help users solve problems raised in the [mail list](https://openeuler.org/en/community/mailing-list/) and issues \(including [code repository issues](https://gitee.com/organizations/openeuler/issues) and [software package repository issues](https://gitee.com/organizations/src-openeuler/issues)\). In addition, you can submit an issue. All these will help the openEuler community to develop better. - -## Documents - -You can contribute to the community by submitting code. We also welcome your feedback on problems and difficulties, or suggestions on improving the usability and integrity of documents. For example, problems in obtaining software or documents and difficulties in using the system. Welcome to pay attention to and improve the documentation module of the [openEuler community](https://openeuler.org/en/). - -## IRC - -openEuler has also opened a channel in IRC as an additional channel to provide community support and interaction. For details, see [openEuler IRC](https://gitee.com/openeuler/community/blob/master/en/communication/IRCs.md). - diff --git a/docs/en/docs/Releasenotes/installing-the-os.md b/docs/en/docs/Releasenotes/installing-the-os.md deleted file mode 100644 index 27cda92414b98895cb39dc8a402ce709bf0193c8..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/installing-the-os.md +++ /dev/null @@ -1,193 +0,0 @@ -# Installing the OS - -## Release Package - -The following table lists the [openEuler release files](http://repo.openeuler.org/openEuler-21.03/), including the ISO release package, container image, VM image, and repo source that is used online. - -**Table 1** openEuler release files - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DirectoryTypeDescription
ISOISO release packageThe directory differentiates the ISO release packages for the AArch64, x86, and source, respectively. The details are as follows: -
    -
  • openEuler-21.03-aarch64-dvd.iso and openEuler-21.03-x86_64-dvd.iso are the openEuler basic installation software package ISO files for the AArch64 and x86 architectures, respectively. The ISO files contain basic components running on the openEuler OS and meet the basic requirements of developers.
  • -
  • openEuler-21.03-everything-aarch64-dvd.iso and openEuler-21.03-everything-x86_64-dvd.iso are the openEuler full software package ISO files for the AArch64 and x86 architectures, respectively. In addition to all software in the openEuler basic installation software package, the ISO files also contain the software packages that have been verified in the openEuler community, which meet the advanced requirements of developers.
  • -
  • openEuler-21.03-debuginfo-aarch64-dvd.iso and openEuler-21.03-debuginfo-x86_64-dvd.iso are the openEuler debugging software package ISO files for the AArch64 and x86 architectures, respectively. The ISO files contain the symbol table information required for debugging and are used for debugging software functions and performance.
  • -
  • openEuler-21.03-source-dvd.iso is the ISO file that contains all source code software packages of the openEuler community, which is used offline by developers.
  • -
-
Note: -

Each ISO release package has its own verification file, which is used to verify the integrity of the ISO release package.

-
-
docker_imgContainer imageThe openEuler container image, which provides only the basic bash environment, is used as the basic container image. The directory differentiates the container images for the AArch64 and x86 architectures, respectively. -
Note: -

Each container image has its own verification file, which is used to verify the integrity of the container image.

-
-
virtual_machine_imgVM image

The openEuler VM image provides only the basic running environment to shorten the VM deployment time. The directory differentiates the VM images for the AArch64 and x86 architectures, respectively.

-
说明: -
-
    -
  • The default password of the root user of the VM image is openEuler12#$. Change the password upon the first login.
  • -
  • Each VM image has its own verification file, which is used to verify the integrity of the VM image.
  • -
-
-
-
EPOLRepo sourceThe repo sources of the openEuler third-party software package are mainly contributed by third parties and communities, and are maintained by the providers. The directory differentiates the repo sources for the AArch64 and x86 architectures, respectively.
OSThe repo source of the openEuler basic installation software package provides the offline version upgrade function. The content of the software package is the same as that of the basic installation software package ISO in the ISO release package. The directory differentiates the repo sources for the AArch64 and x86 architectures, respectively.
debuginfoThe repo source of the openEuler debugging software package provides the online download function. The content of the software package is the same as that of the debugging software package ISO in the ISO release package. The directory differentiates the repo sources for the AArch64 and x86 architectures, respectively.
everythingThe repo source of the openEuler full software package provides the online download and version upgrade functions. The content of the software package is the same as that of the full software package ISO in the ISO release package. The directory differentiates the repo sources for the AArch64 and x86 architectures, respectively.
extrasThe repo source of the openEuler extended software package is used to release new software packages with added features. The directory differentiates the repo sources for the AArch64 and x86 architectures, respectively.
sourceThe repo source of all source code software packages in the openEuler community, which is used online by developers.
updateThe repo source of the openEuler upgrade software package, which is used to fix bugs and common vulnerabilities and exposures (CVE) in released versions and to update and release software with enhancements. It provides online download and software upgrade functions. The directory differentiates the repo sources for the AArch64 and x86 architectures, respectively.
- - - -## Minimal Hardware Specifications - -[Table 5](#en-us_topic_0182825778_tff48b99c9bf24b84bb602c53229e2541) lists the minimum hardware specifications for installing openEuler 21.03. - -**Table 5** Minimal hardware specifications - - - - - - - - - - - - - - - - -

Component

-

Minimal Hardware Specification

-

CPU

-

Kunpeng 920 (architecture: AArch64)

-

x86-64 (Skylake or later)

-

Memory

-

≥ 8 GB

-

Hard disk

-

≥ 120 GB

-
- -## Hardware Compatibility - -[Table 6](#en-us_topic_0227922427_table39822012) lists the typical configurations of servers and components supported by openEuler. openEuler will gradually support other servers in the future. Partners and developers are welcome to participate in the contribution and validation. - -**Table 6** Supported servers and typical configurations - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Vendor

-

Server

-

Server Model

-

Component

-

Typical Configuration

-

Huawei

-

TaiShan 200

-

2280 balanced model

-

CPU

-

HiSilicon Kunpeng 920

-

Memory

-

32G\*4 2933MHz

-

RAID controller card

-

LSI SAS3508

-

Network

-

TM210

-

Huawei

-

FusionServer Pro

-

2288H V5 rack server

-

CPU

-

Intel(R) Xeon(R) Gold 5118 CPU @ 2.30GHz

-

Memory

-

32*4 2400MHz

-

RAID controller card

-

LSI SAS3508

-

Network

-

X722

-
- diff --git a/docs/en/docs/Releasenotes/introduction.md b/docs/en/docs/Releasenotes/introduction.md deleted file mode 100644 index 7dca4bb25e938f9a2034a7469be9f1125c5a1bf2..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/introduction.md +++ /dev/null @@ -1,4 +0,0 @@ -# Introduction - -openEuler is an open-source operating system. The current openEuler kernel is based on Linux and supports Kunpeng and other processors. It fully unleashes the potential of computing chips. As an efficient, stable, and secure open-source OS built by global open-source contributors, openEuler applies to database, big data, cloud computing, and artificial intelligence \(AI\) scenarios. In addition, openEuler community is an open-source community for global OSs. Through community cooperation, openEuler builds an innovative platform, builds a unified and open OS that supports multiple processor architectures, and promotes the prosperity of the software and hardware application ecosystem. - diff --git a/docs/en/docs/Releasenotes/key-features.md b/docs/en/docs/Releasenotes/key-features.md deleted file mode 100644 index 53df363b8522c681741d1df7d0fdc41a6ce5383a..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/key-features.md +++ /dev/null @@ -1,49 +0,0 @@ -# Key Features - -- StratoVirt: Combines high security and performance with lightweight loads, low power consumption, and flexible component splitting for trusted virtualization platform in all scenarios. - - - Uses the Rust language, supports **seccomp** and multi-tenant isolation, providing a secure and trusted operating environment. - - Supports startup within 50 ms and memory noise floor of less than 4 MB, achieving the ultimate performance and lightweight deployment in various scenarios across-device-edge-cloud. - - Supports multiple hardware acceleration virtualized engines, such as x86 VT and Kunpeng-V. - - Supports device scaling within milliseconds, providing flexible resource scaling capabilities for lightweight loads. - - Scalable device models, supports complex device specifications such as PCI, and compatible with the QEMU software ecosystem. - - Supports multiple computing, network, and storage acceleration solutions, and flexible collaboration of heterogeneous computing power. - -- iSula: A lightweight container solution that unifies IoT, edge, and cloud computing. - - - Optimized operation performance for the startup and container lifecycle. - - **isula-build**, a container image build tool that provides secure and fast container image build capabilities. - - Secure and trusted VM startup for enhanced VM security. - -- Enhanced virtualization features - - - Optimizes VM lock preemption with dual-layer scheduling and Hypervisor-aware VM scheduling, delivering higher performance in the multi-core overcommitment scenario. - - Optimizes the IPI interruption performance using the Guest-Idle-Haltpoll mechanism, improving the database service performance. - - For the virtualization feature of the ARM platform, supports the CPU/memory hot plug and the custom mode for the KVM CPU, making resource configuration more flexible. - - Quickly collects performance indicators of a VM using the O\&M tool VMTOP. - - Enables hardlockup detection using the PMU NMI watchdog feature. - -- Kernel feature enhancement - - - Enhancement for IMA commercial use: Based on the open source IMA solution, improves security, performance, and usability to facilitate commercial use. - - NUMA Aware Qspinlock: Improves system performance by reducing cross-NUMA cache synchronization and ping-pong operations caused by lock competition. - - Ktask parallelism: A kernel task parallelism framework that supports the parallel operation of kernel tasks. - - MPAM resource control: Supports Cache QoS and memory bandwidth control technology for the ARM64 architecture. - - Memory system lock optimization: Optimizes vmalloc allocation lock and Pagecache lock. - -- Programming languages and compilers - - - JDK8 enhancement: Supports the APPCDS feature and crc32 hardware acceleration instruction. - - GCC optimization: Supports cyclic optimization, automatic vectorization, and global optimization. - -- Hardware and chip enablement - - - Raspberry Pi: Supports the Raspberry series boards. - -- Desktop support - - - UKUI: Default desktop environment of the Kylin OS. Its layout, style, and usage habits are similar to those of the traditional Windows OS. - -- Intelligent O\&M - - - A-Tune: An intelligent system performance optimization engine that infers service features and configures the optimal system parameter set for the optimal service operations. \ No newline at end of file diff --git a/docs/en/docs/Releasenotes/known-issues.md b/docs/en/docs/Releasenotes/known-issues.md deleted file mode 100644 index 16ada9132ac580ad434b59d9b922e73649597fbb..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/known-issues.md +++ /dev/null @@ -1,29 +0,0 @@ -# Known Issues - - - - - - - - - - - - - - - - -

Issue

-

Description

-

I1VR1W

-

An error message is displayed when the x86 QCOW2 image is used for VM creation or the ISO image is used for physical machine installation. The error message is output as expected. For details, see the issue response.

-

I1U1LP

-

The ARM-based physical machine uses the drive that has been written into the file system for customized partitioning, but the partitioning fails. A special path can be used to prevent this issue. For details, see the issue response.

-

I1VTC5

-

In the pressure test in the overcommitment scenario, frame freezing occurs to vmtop -H page turning when the number of vCPUs is greater than 1,000. The impact scope is controllable. For details, see the issue response.

-

I1WVM8

-

Among the CPU usage data collected by vmtop, the single core whose usage exceeds 100% exists. The impact scope is controllable. For details, see the issue response.

-
- diff --git a/docs/en/docs/Releasenotes/public_sys-resources/icon-caution.gif b/docs/en/docs/Releasenotes/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Releasenotes/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/Releasenotes/public_sys-resources/icon-danger.gif b/docs/en/docs/Releasenotes/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Releasenotes/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/Releasenotes/public_sys-resources/icon-note.gif b/docs/en/docs/Releasenotes/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Releasenotes/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/Releasenotes/public_sys-resources/icon-notice.gif b/docs/en/docs/Releasenotes/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Releasenotes/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/Releasenotes/public_sys-resources/icon-tip.gif b/docs/en/docs/Releasenotes/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Releasenotes/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/Releasenotes/public_sys-resources/icon-warning.gif b/docs/en/docs/Releasenotes/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Releasenotes/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/Releasenotes/release_notes.md b/docs/en/docs/Releasenotes/release_notes.md deleted file mode 100644 index 146d127b10ffcbf95e9018b67d3eb2de167ade40..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/release_notes.md +++ /dev/null @@ -1,3 +0,0 @@ -# Release Notes - -This document is the release notes for the openEuler 21.03 release version. \ No newline at end of file diff --git a/docs/en/docs/Releasenotes/resolved-issues.md b/docs/en/docs/Releasenotes/resolved-issues.md deleted file mode 100644 index 01107f9b33cb9d4ebeb2f23e7e52130dd9a42951..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/resolved-issues.md +++ /dev/null @@ -1,226 +0,0 @@ -# Resolved Issues - -For the complete issue list, see [Complete Issue List](https://gitee.com/organizations/src-openeuler/issues). - -For details about the complete kernel submission records, see [Record Submission](https://gitee.com/openeuler/kernel/commits/openEuler-1.0-LTS). - -**Applications and basic services** - - - - - - - - - - - - - - - - - - - - -

Issue

-

Description

-

I1TPY4

-

x86/arm mariadb-server installation fails.

-

I1TOV5

-

lm_sensors cannot be started by default, and systemd is in the degraded state.

-

I1TOCE

-

The kdump on an x86- or ARM-based physical machine fails to be started.

-

I1TZH1

-

The rule does not take effect when the destination port is 80 and the data packages from the source IP address of a host are added to the x86-based server.

-

I1T4O3

-

During x86 PXE installation, the %packages file in the .ks file is used to install minimal, @core, and @base. After the installation is successful, the startup is suspended.

-

I1T8JJ

-

The installation of the ARM-based freeRADIUS server fails.

-
- -**Programming languages and compilers ** - - - - - - - - - - -

Issue

-

Description

-

I1RUM6

-

Track the community to resolve the issue that the type of the file generated after the compilation of gcc -static-pie is incorrect.

-
- -**Virtualization and containers ** - - - - - - - - - - - - - - -

Issue

-

Description

-

I1TB7N

-

PMU nmi watchdog does not support CPU hot-plug.

-

I1TXAU

-

The VM fails to be started when the VM CPU mode is configured with host-model.

-

I1U8BP

-

When openEuler is used to set trusted boot as an image, the VM cannot identify the TPM device.

-
- -**Kernel** - - - - - - - - - - - - - - - - - - -

Issue

-

Description

-

I17YPQ

-

The drive connected to the LSI SAS3408 RAID controller card cannot be identified during the installation.

-

I1JZHT

-

The Netdevsim is repeatedly loaded and uninstalled, causing the system to reset.

-

I1RUC8

-

The performance loss of vmalloc on x86-based servers is huge.

-

I1R86G

-

An error of pread is reported when openEuler performs a test on the open GaussDB, causing the return to -EIO.

-

I1SISM

-

The XFS drive with size=8192 cannot be mounted to the openEuler on the x86 platform.

-
- -**Security ** - - - - - - - - - - -

Issue

-

Description

-

I1TQ15

-

An error is reported when the firewall-cmd --reload command is executed to add an IP address set that does not exist to the drop area as the source.

-
- -**Hardware and chips** ****** - - - - - - - - - - - - -

Issue

-

Description

-

I1SY0K

-

The Raspberry Pi Wi-Fi is unavailable and the connection to the Wi-Fi network fails.

-

I1R4G1

-

The connection to the Raspberry Pi BT fails repeatedly.

-
- -**File system ** - - - - - - - - - - - - - - -

Issue

-

Description

-

I1E0KN

-

A core dump occurs when a user creates and deletes folders concurrently in the XFS file system and uses the find command to query the folders.

-

I1MA88

-

The libguestfs uses the Gnulib code that has a vulnerability, causing a core dump.

-

I1T3GC

-

The memory allocation is reported to fail when the mounted drive letter is checked at the NFS client.

-
- -**Network ** - - - - - - - - - - - - - - -

Issue

-

Description

-

I1TO3R

-

An error occurs when the network-manager-appletda is invoked during installation and ipv4 is manually configured.

-

I1TYDG

-

The NetworkManager service cannot not be self-healed due to the D-Bus service exception.

-

I1AD7N

-

The IP address is lost when the network is restarted because the return value of the grep -L command in the network-scripts file is changed.

-
- -**System tools ** - - - - - - - - - - - -

Issue

-

Description

-

I1U7RL

-

An error occurs during system-config-printer installation.

-

I1T8H4

-

After the x86/ARM-based device is installed by default, the systemctl -all --failed command is executed and the tuned service fails to be started.

-
- diff --git a/docs/en/docs/Releasenotes/source-code.md b/docs/en/docs/Releasenotes/source-code.md deleted file mode 100644 index e8e41391360885baba774fecef9a255a2c6ff987..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/source-code.md +++ /dev/null @@ -1,9 +0,0 @@ -# Source Code - -openEuler contains two code repositories: - -- Code repository: [https://gitee.com/openeuler](https://gitee.com/openeuler) -- Software package repository: [https://gitee.com/src-openeuler](https://gitee.com/src-openeuler) - -The openEuler release packages also provide the source ISO files. For details, see [Installing the OS](./installing-the-os.md). - diff --git a/docs/en/docs/Releasenotes/terms-of-use.md b/docs/en/docs/Releasenotes/terms-of-use.md deleted file mode 100644 index 1a7fbcac36f3cc1243592d9ce6a50f3704f9288f..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/terms-of-use.md +++ /dev/null @@ -1,13 +0,0 @@ -# Terms of Use - -**Copyright © 2020 openEuler Community** - -Your replication, use, modification, and distribution of this document are governed by the Creative Commons License Attribution-ShareAlike 4.0 International Public License \(CC BY-SA 4.0\). You can visit [https://creativecommons.org/licenses/by-sa/4.0/](https://creativecommons.org/licenses/by-sa/4.0/) to view a human-readable summary of \(and not a substitute for\) CC BY-SA 4.0. For the complete CC BY-SA 4.0, visit [https://creativecommons.org/licenses/by-sa/4.0/legalcode](https://creativecommons.org/licenses/by-sa/4.0/legalcode). - -**Trademarks and Permissions** - -openEuler is the trademark of the openEuler community. All other trademarks and registered trademarks mentioned in this document are the property of their respective holders. - -**Disclaimer** - -This document is used only as a guide. Unless otherwise specified by applicable laws or agreed by both parties in written form, all statements, information, and recommendations in this document are provided "AS IS" without warranties, guarantees or representations of any kind, including but not limited to non-infringement, timeliness, and specific purposes. diff --git a/docs/en/docs/Releasenotes/user-notice.md b/docs/en/docs/Releasenotes/user-notice.md deleted file mode 100644 index 65e12ded998bf253624df6e3802e58e015e92f46..0000000000000000000000000000000000000000 --- a/docs/en/docs/Releasenotes/user-notice.md +++ /dev/null @@ -1,5 +0,0 @@ -# User Notice - -- The version number counting rule of openEuler is changed from openEuler _x.x_ to openEuler _year_._month_. For example, openEuler 21.03 indicates that the version is released in March 2020. -- The [Python core team](https://www.python.org/dev/peps/pep-0373/#update) has stopped maintaining Python 2 in January 2020. In 2020, openEuler 21.03 fixes only the critical CVE of Python 2 and will reach the end of maintenance \(EOM\) on December 31, 2020. Please switch to Python 3 as soon as possible. - diff --git a/docs/en/docs/SecHarden/account-passwords.md b/docs/en/docs/SecHarden/account-passwords.md deleted file mode 100644 index 10b81b847bebbb6b7772fbb165a9311ccf8c6ea2..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/account-passwords.md +++ /dev/null @@ -1,335 +0,0 @@ -# Account Passwords - -- [Account Passwords](#account-passwords) - - [Shielding System Accounts](#shielding-system-accounts) - - [Restricting Permissions on the su Command](#restricting-permissions-on-the-su-command) - - [Setting Password Complexity](#setting-password-complexity) - - [Setting the Password Validity Period](#setting-the-password-validity-period) - - [Setting Password Encryption Algorithms](#setting-password-encryption-algorithms) - - [Locking an Account After Three Login Failures](#locking-an-account-after-three-login-failures) - - [Hardening the su Command](#hardening-the-su-command) - -## Shielding System Accounts - -### Description - -Accounts excluding user accounts are system accounts. System accounts cannot be used for logins or performing other operations. Therefore, system accounts must be shielded. - -### Implementation - -Modify the shell of a system account to **/sbin/nologin**. - -``` -usermod -L -s /sbin/nologin $systemaccount -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->_$systemaccount_ indicates the system account. - -## Restricting Permissions on the su Command - -### Description - -The **su** command is used to switch user accounts. To improve system security, only the user **root** and users in the wheel group can use the **su** command. - -### Implementation - -Modify the **/etc/pam.d/su** file as follows: - -``` -auth required pam_wheel.so use_uid -``` - -   - -**Table 1** Configuration item in pam\_wheel.so - - - - - - - - - - -

Item

-

Description

-

use_uid

-

UID of the current account.

-
- -## Setting Password Complexity - -### Description - -You can set the password complexity requirements by modifying the corresponding configuration file. You are advised to set the password complexity based on the site requirements. - -### Implementation - -The password complexity is implemented by the **pam\_pwquality.so** and **pam\_pwhistory.so** modules in the **/etc/pam.d/password-auth** and **/etc/pam.d/system-auth** files. You can modify the configuration items of the two modules to change the password complexity requirements. - -### Example - -This section provides an example for configuring password complexity. - -**Password Complexity Requirements** - -1. Contains at least eight characters. -2. Contains at least three types of the following characters: - - - At least one lowercase letter - - - At least one uppercase letter - - - At least one digit - - - At least one space or one of the following special characters: \` \~ ! @ \# $ % ^ & \* \( \) - \_ = + \\ | \[ \{ \} \] ; : ' " , < . \> / ? - -3. Cannot be the same as an account or the account in reverse order. -4. Cannot be the last five passwords used. - -**Implementation** - -Add the following content to the **/etc/pam.d/password-auth** and **/etc/pam.d/system-auth** files: - -``` -password requisite pam_pwquality.so minlen=8 minclass=3 enforce_for_root try_first_pass local_users_only retry=3 dcredit=0 ucredit=0 lcredit=0 ocredit=0 -password required pam_pwhistory.so use_authtok remember=5 enforce_for_root -``` - -   - -**Configuration Item Description** - -For details about the configuration items of **pam\_pwquality.so** and **pam\_pwhistory.so**, see [Table 1](#table201221044172117) and [Table 2](#table1212544452120), respectively. - -**Table 1** Configuration items in pam\_pwquality.so - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Item

-

Description

-

minlen=8

-

A password must contain at least eight characters.

-

minclass=3

-

A password must contain at least three of the following types: uppercase letters, lowercase letters, digits, and special characters.

-

ucredit=0

-

A password contains any number of uppercase letters.

-

lcredit=0

-

A password contains any number of lowercase letters.

-

dcredit=0

-

A password contains any number of digits.

-

ocredit=0

-

A password contains any number of special characters.

-

retry=3

-

Each time a maximum of three password changes is allowed.

-

enforce_for_root

-

This configuration is also effective for user root.

-
- -**Table 2** Configuration items in pam\_pwhistory.so - - - - - - - - - - - - - -

Item

-

Description

-

remember=5

-

A password must be different from the last five passwords used.

-

enforce_for_root

-

This configuration is also effective for user root.

-
- -## Setting the Password Validity Period - -### Description - -To ensure system security, you are advised to set the password validity period and notify users to change passwords before the passwords expire. - -### Implementation - -The password validity period is set by modifying the **/etc/login.defs** file. [Table 1](#en-us_topic_0152100281_t77b5d0753721450c81911c18b74e82eb) describes the hardening items. All hardening items in the table are in the **/etc/login.defs** file. You can directly modify the items in the configuration file. - -**Table 1** Configuration items in login.defs - - - - - - - - - - - - - - - - - - - - - - - - -

Item

-

Description

-

Suggestion

-

Configured as Suggested

-

PASS_MAX_DAYS

-

Maximum validity period of a password.

-

90

-

No

-

PASS_MIN_DAYS

-

Minimum interval between password changes.

-

0

-

No

-

PASS_WARN_AGE

-

Number of days before the password expires.

-

7

-

No

-
- ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The **login.defs** file is used to set restrictions on user accounts, such as setting the maximum password validity period and maximum length. The configuration in this file is invalid for the user **root**. If the **/etc/shadow** file contains the same items, the **/etc/shadow** configuration takes precedence over the **/etc/login.defs** configuration. When a user attempts to log in after the password expires, the user will be informed of the password expiry and is required to change the password. If the user does not change the password, the user cannot access the system. - -## Setting Password Encryption Algorithms - -### Description - -For system security, passwords cannot be stored in plaintext in the system and must be encrypted. The passwords that do not need to be restored must be encrypted using irreversible algorithms. Set the password encryption algorithm to SHA-512. This item has been set by default in openEuler. The preceding settings can effectively prevent password disclosure and ensure password security. - -### Implementation - -To set the password encryption algorithm, add the following configuration to the **/etc/pam.d/password-auth** and **/etc/pam.d/system-auth** files: - -``` -password sufficient pam_unix.so sha512 shadow nullok try_first_pass use_authtok -``` - -   - -**Table 1** Configuration items in pam\_unix.so - - - - - - - - - - -

Item

-

Description

-

sha512

-

The SHA-512 algorithm is used for password encryption.

-
- -## Locking an Account After Three Login Failures - -### Description - -To ensure user system security, you are advised to set the maximum number of incorrect password attempts \(three attempts are recommended\) and the automatic unlocking time \(300 seconds are recommended\) for a locked account. - -If an account is locked, any input is invalid but does not cause the locking timer to recount. Records of the user's invalid inputs are cleared once unlocked. The preceding settings protect passwords from being forcibly cracked and improve system security. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->By default, the maximum number of incorrect password attempts is 3 in openEuler. After the system is locked, the automatic unlock time is 60 seconds. - -### Implementation - -The password complexity is set by modifying the **/etc/pam.d/password-auth** and **/etc/pam.d/system-auth** files. The maximum number of incorrect password attempts is set to **3**, and the unlocking time after the system is locked is set to **300** seconds. The configuration is as follows: - -``` -auth required pam_faillock.so preauth audit deny=3 even_deny_root unlock_time=300 -auth [default=die] pam_faillock.so authfail audit deny=3 even_deny_root unlock_time=300 -auth sufficient pam_faillock.so authsucc audit deny=3 even_deny_root unlock_time=300 -``` - -**Table 1** Configuration items in pam\_faillock.so - - - - - - - - - - - - - - - - - - - -

Item

-

Description

-

authfail

-

Captures account login failure events.

-

deny=3

-

A user account will be locked after three login attempts.

-

unlock_time=300

-

A locked common user account is automatically unlocked in 300 seconds.

-

even_deny_root

-

This configuration is also effective for user root.

-
- -## Hardening the su Command - -### Description - -To enhance system security and prevent the environment variables of the current user from being brought into other environments when you run the **su** command to switch to another user, this item has been configured by default in openEuler. The **PATH** variable is always initialized when the **su** command is used to switch users. - -### Implementation - -Modify the **/etc/login.defs** file. The configuration is as follows: - -``` -ALWAYS_SET_PATH=yes -``` diff --git a/docs/en/docs/SecHarden/appendix.md b/docs/en/docs/SecHarden/appendix.md deleted file mode 100644 index 2c47d84fc9055ad6390ee0eb7e63cd76f9b6eff3..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/appendix.md +++ /dev/null @@ -1,36 +0,0 @@ -# Appendix - -This chapter describes the file permissions and **umask** values. - -- [Appendix](#appendix.md) - - [Permissions on Files and Directories](#permissions-on-files-and-directories) - - [umask Values](#umask-values) - - - - - -## Permissions on Files and Directories - -Permission on files and directories in Linux specifies the users who can access and perform operations on files and directories and the access and operation modes. Permissions on files and directories include read only, write only, and execute. - -The following types of users can access files and directories: - -- File creator -- Users in the same group as a file creator -- Users not in the same group as a file creator - -An example of permission on files and directories is described as follows: - -If the permission on **/usr/src** is set to **755** which is 111101101 in binary mode, permissions for each type of users are described as follows: - -- The left-most **111** indicates that the file owner can read, write, and execute the file. -- The middle **101** indicates the group users can read and execute but cannot write the file. -- The right-most **101** indicates that other users can read and execute but cannot write the file. - -## umask Values - -When a user creates a file or directory, the file or directory has a default permission. The default permission is specified by the **umask** value. - -The **umask** value is the complement of the permission value. The actual permission value is obtained by subtracting the **umask** value from the default maximum permission value. The default maximum permission of a file is readable and writable. The default maximum permission of a directory is readable, writable, and executable. The default permission of a file is 666 minus the **umask** value. The default permission of a directory is 777 minus the **umask** value. - diff --git a/docs/en/docs/SecHarden/authentication-and-authorization.md b/docs/en/docs/SecHarden/authentication-and-authorization.md deleted file mode 100644 index e12f2bae5575b2295f2fe704b2dfc60856cf4b35..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/authentication-and-authorization.md +++ /dev/null @@ -1,156 +0,0 @@ -# Authentication and Authorization - -- [Authentication and Authorization](#authentication-and-authorization) - - [Setting a Warning for Remote Network Access](#setting-a-warning-for-remote-network-access) - - [Forestalling Unauthorized System Restart by Holding Down Ctrl, Alt, and Delete](#forestalling-unauthorized-system-restart-by-holding-down-ctrl-alt-and-delete) - - [Setting an Automatic Exit Interval for Shell](#setting-an-automatic-exit-interval-for-shell) - - [Setting the Default umask Value for Users to 0077](#setting-the-default-umask-value-for-users-to-0077) - - [Setting the GRUB2 Encryption Password](#setting-the-grub2-encryption-password) - - [Setting the Secure Single-user Mode](#setting-the-secure-single-user-mode) - - [Disabling Interactive Startup](#disabling-interactive-startup) - - - -## Setting a Warning for Remote Network Access - -### Description - -A warning for remote network access is configured and displayed for users who attempt to remotely log in to the system. The warning indicates the penalty for authorized access and is used to threaten potential attackers. When the warning is displayed, system architecture and other system information are hidden to protect the system from being attacked. - -### Implementation - -This setting can be implemented by modifying the **/etc/issue.net** file. Replace the original content in the **/etc/issue.net** file with the following information \(which has been set by default in openEuler\): - -``` -Authorized users only. All activities may be monitored and reported. -``` - -## Forestalling Unauthorized System Restart by Holding Down Ctrl, Alt, and Delete - -### Description - -By default, you can restart the OS by holding down **Ctrl**, **Alt**, and **Delete**. It is advised to disable this feature to prevent data loss caused by misoperations. - -### Implementation - -To disable the feature of restarting the system by holding down **Ctrl**, **Alt**, and **Delete**, perform the following steps: - -1. Run the following commands to delete the two **ctrl-alt-del.target** files: - - ``` - rm -f /etc/systemd/system/ctrl-alt-del.target - rm -f /usr/lib/systemd/system/ctrl-alt-del.target - ``` - -2. Change **\#CtrlAltDelBurstAction=reboot-force** to **CtrlAltDelBurstAction=none** in the **/etc/systemd/system.conf** file. -3. Run the following command to restart systemd for the modification to take effect: - - ``` - systemctl daemon-reexec - ``` - -## Setting an Automatic Exit Interval for Shell - -### Description - -An unattended shell is prone to listening or attacks. Therefore, it is advised that a mechanism be configured to ensure that a shell can automatically exit when it does not run for a period. - -### Implementation - -At the end of file **/etc/profile**, set the **TMOUT** field \(unit: second\) that specifies the interval for automatic exit as follows: - -``` -export TMOUT=300 -``` - -## Setting the Default umask Value for Users to 0077 - -### Description - -The **umask** value is used to set default permission on files and directories. A smaller **umask** value indicates that group users or other users have incorrect permission, which brings system security risks. Therefore, the default **umask** value must be set to **0077** for all users, that is, the default permission on user directories is **700** and the permission on user files is **600**. The **umask** value indicates the complement of a permission. For details about how to convert the **umask** value to a permission, see [umask Values](#umask-values.md). - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->By default, the **umask** value of the openEuler user is set to **0077**. - -### Implementation - -1. Add **umask 0077** to the **/etc/bashrc** file and all files in the **/etc/profile.d/** directory. - - ``` - echo "umask 0077" >> $FILE - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >_$FILE_ indicates the file name, for example, echo "umask 0077" \>\> /etc/bashrc. - -2. Set the ownership and group of the **/etc/bashrc** file and all files in the **/etc/profile.d/** directory to **root**. - - ``` - chown root.root $FILE - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >_$FILE_ indicates the file name, for example, **chown root.root /etc/bashrc**. - - -## Setting the GRUB2 Encryption Password - -### Description - -GRand Unified Bootloader \(GRUB\) is an operating system boot manager used to boot different systems \(such as Windows and Linux\). GRUB2 is an upgraded version of GRUB. - -When starting the system, you can modify the startup parameters of the system on the GRUB2 screen. To ensure that the system startup parameters are not modified randomly, you need to encrypt the GRUB2 screen. The startup parameters can be modified only when the correct GRUB2 password is entered. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The default password of GRUB2 is **openEuler\#12**. You are advised to change the default password upon the first login and periodically update the password. If the password is leaked, startup item configurations may be modified, causing the system startup failure. - -### Implementation - -1. Run the **grub2-mkpasswd-pbkdf2** command to generate an encrypted password. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >SHA-512 is used as the GRUB2 encryption algorithm. - - ``` - # grub2-mkpasswd-pbkdf2 - Enter password: - Reenter password: - PBKDF2 hash of your password is - grub.pbkdf2.sha512.10000.5A45748D892672FDA02DD3B6F7AE390AC6E6D532A600D4AC477D25C7D087644697D8A0894DFED9D86DC2A27F4E01D925C46417A225FC099C12DBD3D7D49A7425.2BD2F5BF4907DCC389CC5D165DB85CC3E2C94C8F9A30B01DACAA9CD552B731BA1DD3B7CC2C765704D55B8CD962D2AEF19A753CBE9B8464E2B1EB39A3BB4EAB08 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Enter the same password in the **Enter password** and **Reenter password** lines. - >After **openEuler\#12** is encrypted by **grub2-mkpasswd-pbkdf2**, the output is **grub.pbkdf2.sha512.10000.5A45748D892672FDA02DD3B6F7AE390AC6E6D532A600D4AC477D25C7D087644697D8A0894DFED9D86DC2A27F4E01D925C46417A225FC099C12DBD3D7D49A7425.2BD2F5BF4907DCC389CC5D165DB85CC3E2C94C8F9A30B01DACAA9CD552B731BA1DD3B7CC2C765704D55B8CD962D2AEF19A753CBE9B8464E2B1EB39A3BB4EAB08**. The ciphertext is different each time. - -2. Open **/boot/efi/EFI/openEuler/grub.cfg** in a vi editor. Append the following fields to the beginning of **/boot/efi/EFI/openEuler/grub.cfg**. - - ``` - set superusers="root" - password_pbkdf2 root grub.pbkdf2.sha512.10000.5A45748D892672FDA02DD3B6F7AE390AC6E6D532A600D4AC477D25C7D087644697D8A0894DFED9D86DC2A27F4E01D925C46417A225FC099C12DBD3D7D49A7425.2BD2F5BF4907DCC389CC5D165DB85CC3E2C94C8F9A30B01DACAA9CD552B731BA1DD3B7CC2C765704D55B8CD962D2AEF19A753CBE9B8464E2B1EB39A3BB4EAB08 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- The **superusers** field is used to set the account name of the super GRUB2 administrator. - >- The first parameter following the **password\_pbkdf2** field is the GRUB2 account name, and the second parameter is the encrypted password of the account. - - -## Setting the Secure Single-user Mode - -### Description - -When you log in to the system as user **root** in single-user mode, if the **root** password is not set, high security risks exist. - -### Implementation - -This setting can be implemented by modifying the **/etc/sysconfig/init** file. Set **SINGLE** to **SINGLE=/sbin/sulogin**. - -## Disabling Interactive Startup - -### Description - -With interactive guidance, console users can disable audit, firewall, or other services, which compromises system security. Users can disable interactive startup to improve security. This item is disabled by default in openEuler. - -### Implementation - -This setting can be implemented by modifying the **/etc/sysconfig/init** file. Set **PROMPT** to **no**. diff --git a/docs/en/docs/SecHarden/figures/en-us_image_0221925211.png b/docs/en/docs/SecHarden/figures/en-us_image_0221925211.png deleted file mode 100644 index 62ef0decdf6f1e591059904001d712a54f727e68..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/figures/en-us_image_0221925211.png and /dev/null differ diff --git a/docs/en/docs/SecHarden/figures/en-us_image_0221925212.png b/docs/en/docs/SecHarden/figures/en-us_image_0221925212.png deleted file mode 100644 index ad5ed3f7beeb01e6a48707c4806606b41d687e22..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/figures/en-us_image_0221925212.png and /dev/null differ diff --git a/docs/en/docs/SecHarden/file-permissions.md b/docs/en/docs/SecHarden/file-permissions.md deleted file mode 100644 index 23f7dbefaabb9ba8e7cd7847efb7e857dc27ebe1..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/file-permissions.md +++ /dev/null @@ -1,241 +0,0 @@ - -## File Permissions - -- [File Permissions](#file-permissions) - - [Setting the Permissions on and Ownership of Files](#setting-the-permissions-on-and-ownership-of-files) - - [Deleting Unowned Files](#deleting-unowned-files) - - [Removing a Symbolic Link to /dev/null](#removing-a-symbolic-link-to-dev-null) - - [Setting the umask Value for a Daemon](#setting-the-umask-value-for-a-daemon) - - [Adding a Sticky Bit Attribute to Globally Writable Directories](#adding-a-sticky-bit-attribute-to-globally-writable-directories) - - [Disabling the Globally Writable Permission on Unauthorized Files](#disabling-the-globally-writable-permission-on-unauthorized-files) - - [Restricting Permissions on the at Command](#restricting-permissions-on-the-at-command) - - [Restricting Permissions on the cron Command](#restricting-permissions-on-the-cron-command) - - [Restricting Permissions on the sudo Command](#restricting-permissions-on-the-sudo-command) - - -## Setting the Permissions on and Ownership of Files - -### Description - -In Linux, all objects are processed as files. Even a directory will be processed as a large file containing many files. Therefore, the most important thing in Linux is the security of files and directories. Their security is ensured by permissions and owners. - -By default, the permissions and ownership of common directories, executable files, and configuration files in the system are set in openEuler. - -### Implementation - -The following uses the **/bin** directory as an example to describe how to change the permission and ownership of a file: - -- Modify the file permission. For example, set the permission on the **/bin** directory to **755**. - - ``` - chmod 755 /bin - ``` - -- Change the ownership of the file. For example, set the ownership and group of the **/bin** directory to **root:root**. - - ``` - chown root:root /bin - ``` - - -## Deleting Unowned Files - -### Description - -When deleting a user or group, the system administrator may forget to delete the files of the user or group. If the name of a new user or group is the same as that of the deleted user or group, the new user or group will own files on which it has no permission. You are advised to delete these files. - -### Implementation - -Delete the file whose user ID does not exist. - -1. Search for the file whose user ID does not exist. - - ``` - find / -nouser - ``` - -2. Delete the found file. In the preceding command, _filename_ indicates the name of the file whose user ID does not exist. - - ``` - rm -f filename - ``` - - -Delete the file whose group ID does not exist. - -1. Search for the file whose user ID does not exist. - - ``` - find / -nogroup - ``` - -2. Delete the found file. In the preceding command, _filename_ indicates the name of the file whose user ID does not exist. - - ``` - rm -f filename - ``` - - -## Removing a Symbolic Link to /dev/null - -### Description - -A symbolic link to **/dev/null** may be used by malicious users. This affects system security. You are advised to delete these symbolic links to improve system security. - -### Special Scenario - -After openEuler is installed, symbolic links to **/dev/null** may exist. These links may have corresponding functions. \(Some of them are preconfigured and may be depended by other components.\) Rectify the fault based on the site requirements. For details, see [Implementation](#en-us_topic_0152100319_s1b24647cdd834a8eaca3032611baf072). - -For example, openEuler supports UEFI and legacy BIOS installation modes. The GRUB packages supported in the two boot scenarios are installed by default. If you select the legacy BIOS installation mode, a symbolic link **/etc/grub2-efi.cfg** is generated. If you select the UEFI installation mode, a symbolic link **/etc/grub2.cfg** is generated. You need to process these symbolic links based on the site requirements. - -### Implementation - -1. Run the following command to search for symbolic links to **/dev/null**: - - ``` - find dirname -type l -follow 2>/dev/null - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >_dir__name_ indicates the directory to be searched. Normally, key system directories, such as **/bin**, **/boot**, **/usr**, **/lib64**, **/lib**, and **/var**, need to be searched. - -2. If these symbolic links are useless, run the following command to delete them: - - ``` - rm -f filename - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >_filename_ indicates the file name obtained in [Step 1](#en-us_topic_0152100319_l4dc74664c4fb400aaf91fb314c4f9da6). - - -## Setting the umask Value for a Daemon - -### Description - -The **umask** value is used to set default permission on files and directories. If the **umask** value is not specified, the file has the globally writable permission. This brings risks. A daemon provides a service for the system to receive user requests or network customer requests. To improve the security of files and directories created by the daemon, you are advised to set **umask** to **0027**. The **umask** value indicates the complement of a permission. For details about how to convert the **umask** value to a permission, see [umask Values](#umask-values.md). - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->By default, the **umask** value of the daemon is set to **0022** in openEuler. - -### Implementation - -In configuration file **/etc/sysconfig/init**, add **umask 0027** as a new row. - -## Adding a Sticky Bit Attribute to Globally Writable Directories - -### Description - -Any user can delete or modify a file or directory in a globally writable directory, which leads to unauthorized file or directory deletion. Therefore, the sticky bit attribute is required for globally writable directories. - -### Implementation - -1. Search for globally writable directories. - - ``` - find / -type d -perm -0002 ! -perm -1000 -ls | grep -v proc - ``` - -2. Add the sticky bit attribute to globally writable directories. _dirname_ indicates the name of the directory that is found. - - ``` - chmod +t dirname - ``` - - -## Disabling the Globally Writable Permission on Unauthorized Files - -### Description - -Any user can modify globally writable files, which affects system integrity. - -### Implementation - -1. Search for all globally writable files. - - ``` - find / -type d \( -perm -o+w \) | grep -v procfind / -type f \( -perm -o+w \) | grep -v proc - ``` - -2. View the settings of files \(excluding files and directories with sticky bits\) listed in step 1, and delete the files or disable the globally writable permission on them. Run the following command to remove the permission. In the command, _filename_ indicates the file name. - -    - - ``` - chmod o-w filename - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >You can run the following command to check whether the sticky bit is set for the file or directory. If the command output contains the **T** flag, the file or directory is with a sticky bit. In the command, _filename_ indicates the name of the file or directory to be queried. - >``` - >ls -l filename - >``` - - -## Restricting Permissions on the at Command - -### Description - -The **at** command is used to create a scheduled task. Users who can run the **at** command must be specified to protect the system from being attacked. - -### Implementation - -1. Delete the **/etc/at.deny** file. - - ``` - rm -f /etc/at.deny - ``` - -2. Run the following command to change the ownership of file **/etc/at.allow** file to **root:root**. - - ``` - chown root:root /etc/at.allow - ``` - -3. Set that only user **root** can operate file **/etc/at.allow**. - - ``` - chmod og-rwx /etc/at.allow - ``` - - -## Restricting Permissions on the cron Command - -### Description - -The **cron** command is used to create a routine task. Users who can run the **cron** command must be specified to protect the system from being attacked. - -### Implementation - -1. Delete the **/etc/cron.deny** file. - - ``` - rm -f /etc/at.deny - ``` - -2. Run the following command to change the ownership of the **/etc/cron.allow** file to **root:root**: - - ``` - chown root:root /etc/cron.allow - ``` - -3. Set that only user **root** can operate file **/etc/cron.allow**. - - ``` - chmod og-rwx /etc/cron.allow - ``` - - -## Restricting Permissions on the sudo Command - -### Description - -A common user can use the **sudo** command to run commands as the user **root**. To harden system security, it is necessary to restrict permissions on the **sudo** command. Only user **root** can use the **sudo** command. By default, openEuler does not retrict the permission of non-root users to run the sudo command. - -### Implementation - -Modify the **/etc/sudoers** file to restrict permissions on the **sudo** command. Comment out the following configuration line: - -``` -#%wheel ALL=(ALL) ALL -``` diff --git a/docs/en/docs/SecHarden/kernel-parameters.md b/docs/en/docs/SecHarden/kernel-parameters.md deleted file mode 100644 index b193426b3b65a5e4bcad99a471cc2c82c2d46cc8..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/kernel-parameters.md +++ /dev/null @@ -1,229 +0,0 @@ -# Kernel Parameters - -- [Kernel Parameters](#kernel-parameters) - - [Hardening the Security of Kernel Parameters](#hardening-the-security-of-kernel-parameters) - - -## Hardening the Security of Kernel Parameters - -### Description - -Kernel parameters specify the status of network configurations and application privileges. The kernel provides system control which can be fine-tuned or configured by users. This function can improve the security of the OS by controlling configurable kernel parameters. For example, you can fine-tune or configure network options to improve system security. - -### Implementation - -1. Write the hardening items in [Table 1](#en-us_topic_0152100187_t69b5423c26644b26abe94d88d38878eb) to the **/etc/sysctl.conf** file. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Record security hardening items as follows: - >``` - >net.ipv4.icmp_echo_ignore_broadcasts = 1 - >net.ipv4.conf.all.rp_filter = 1 - >net.ipv4.conf.default.rp_filter = 1 - >``` - - **Table 1** Policies for hardening the security of kernel parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Item

-

Description

-

Suggestion

-

Configured as Suggested

-

net.ipv4.icmp_echo_ignore_broadcasts

-

Specifies whether ICMP broadcast packets are accepted. They are not accepted according to the hardening policy.

-

1

-

Yes

-

net.ipv4.conf.all.rp_filter

-

Specifies whether the actual source IP address used by a data packet is related to a routing table and whether the data packet receives responses through interfaces. The item is enabled according to the hardening policy.

-

1

-

Yes

-

net.ipv4.conf.default.rp_filter

-

1

-

Yes

-

net.ipv4.ip_forward

-

The IP forwarding function prevents unauthorized IP address packets from being transferred to a network. The item is disabled according to the hardening policy.

-

0

-

Yes

-

net.ipv4.conf.all.accept_source_route

-

accept_source_route indicates that a packet sender can specify a path for sending the packet and a path for receiving a response. The item is disabled according to the hardening policy.

-

0

-

Yes

-

net.ipv4.conf.default.accept_source_route

-

0

-

Yes

-

net.ipv4.conf.all.accept_redirects

-

Specifies whether a redirected ICMP packet is sent. The packet is not sent according to the hardening policy.

-

0

-

Yes

-

net.ipv4.conf.default.accept_redirects

-

0

-

Yes

-

net.ipv6.conf.all.accept_redirects

-

0

-

Yes

-

net.ipv6.conf.default.accept_redirects

-

0

-

Yes

-

net.ipv4.conf.all.send_redirects

-

Specifies whether a redirected ICMP packet is sent to another server. This item is enabled only when the host functions as a route. The item is disabled according to the hardening policy.

-

0

-

Yes

-

net.ipv4.conf.default.send_redirects

-

0

-

Yes

-

net.ipv4.icmp_ignore_bogus_error_responses

-

Fake ICMP packets are not recorded to logs, which saves disk space. The item is enabled according to the hardening policy.

-

1

-

Yes

-

net.ipv4.tcp_syncookies

-

SYN attack is a DoS attack that forces system restart by occupying system resources. TCP-SYN cookie protection is enabled according to the hardening policy.

-

1

-

Yes

-

kernel.dmesg_restrict

-

Hardens dmesg messages. Only the administrator is allowed to view the messages.

-

1

-

Yes

-

kernel.sched_autogroup_enabled

-

Determines whether the kernel automatically groups and schedules threads. After this item is enabled, scheduling groups compete for time slices, and threads in a scheduling group compete for the time slices allocated to the scheduling group. The item is disabled according to the hardening policy.

-

0

-

No

-

kernel.sysrq

-

Disables the magic key.

-
NOTE:

You are advised to disable the magic key so that commands cannot be directly passed to the kernel.

-
-

0

-

Yes

-

net.ipv4.conf.all.secure_redirects

-

Specifies whether redirected ICMP messages sent from any servers or from gateways listed in the default gateway list are accepted. Redirected ICMP messages are received from any servers according to the hardening policy.

-

0

-

Yes

-

net.ipv4.conf.default.secure_redirects

-

0

-

Yes

-
- -2. Run the following command to load the kernel parameters set in the **sysctl.conf** file: - - ``` - sysctl -p /etc/sysctl.conf - ``` - - -### Other Security Suggestions - -- **net.ipv4.icmp\_echo\_ignore\_all**: ignores ICMP requests. - - For security purposes, you are advised to enable this item. The default value is **0**. Set the value to **1** to enable this item. - - After this item is enabled, all incoming ICMP Echo request packets will be ignored, which will cause failure to ping the target host. Determine whether to enable this item based on your actual networking condition. - -- **net.ipv4.conf.all.log\_martians/net.ipv4.conf.default.log\_martians**: logs spoofed, source routed, and redirect packets. - - For security purposes, you are advised to enable this item. The default value is **0**. Set the value to **1** to enable this item. - - After this item is enabled, data from forbidden IP addresses will be logged. Too many new logs will overwrite old logs because the total number of logs allowed is fixed. Determine whether to enable this item based on your actual usage scenario. - -- **net.ipv4.tcp\_timestamps**: disables tcp\_timestamps. - - For security purposes, you are advised to disable tcp\_timestamps. The default value is **1**. Set the value to **0** to disable tcp\_timestamps. - - After this item is disabled, TCP retransmission timeout will be affected. Determine whether to disable this item based on the actual usage scenario. - -- **net.ipv4.tcp\_max\_syn\_backlog**: determines the number of queues that is in SYN\_RECV state. - - This parameter determines the number of queues that is in SYN\_RECV state. When this number is exceeded, new TCP connection requests will not be accepted. This to some extent prevents system resource exhaustion. Configure this parameter based on your actual usage scenario. diff --git a/docs/en/docs/SecHarden/os-hardening-overview.md b/docs/en/docs/SecHarden/os-hardening-overview.md deleted file mode 100644 index 44fd1bdd4083b58b5cf375cc9437cdf5874b5bf7..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/os-hardening-overview.md +++ /dev/null @@ -1,121 +0,0 @@ -# OS Hardening Overview -- [OS Hardening Overview](#os-hardening-overview) - - [Security Hardening Purpose](#security-hardening-purpose) - - [Security Hardening Solution](#security-hardening-solution) - - [Security Hardening Impacts](#security-hardening-impacts) - -This chapter describes the purpose and solution of openEuler system hardening. - - - -## security-hardening-purpose - -The OS, as the core of the information system, manages hardware and software resources and is the basis of information system security. Applications must depend on the OS to ensure the integrity, confidentiality, availability, and controllability of information. Without the OS security protection, protective methods against hackers and virus attacks at other layers cannot meet the security requirements. - -Therefore, security hardening is essential for an OS. Security hardening helps build a dynamic and complete security system, enhance product security, and improve product competitiveness. - -## security-hardening-solution - -This section describes the openEuler security hardening solution, including the hardening method and items. - -### Security Hardening Method - -You can manually modify security hardening configurations or run commands to harden the system, or use the security hardening tool to modify security hardening items in batches. The openEuler security hardening tool runs as openEuler-security.service. When the system is started for the first time, the system automatically runs the service to execute the default hardening policy, and automatically set the service not to start as the system starts. - -You can modify the **security.conf** file and use the security hardening tool to implement user-defined security hardening. - - -## security hardening impacts - -Security hardening on file permissions and account passwords may change user habits, affecting system usability. For details about common hardening items that affect system usability, see [Table 1](#en-us_topic_0152100325_ta4a48f54ff2849ada7845e2380209917). - -**Table 1** Security hardening impacts - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Item

-

Suggestion

-

Impact

-

Configured By Default

-

Timeout setting on the text-based user interface (TUI)

-

When the TUI is idle for a long period of time, it automatically exits.

-
NOTE:

When a user logs in to the system using SSH, the timeout period is determined by the smaller value of the TMOUT field in the /etc/profile file and the ClientAliveInterval field in the /etc/ssh/sshd_config file. You are advised to set this parameter to 300 seconds.

-
-

If you do not perform any operation on the TUI for a long time, TUI automatically exits.

-

No

-

Password complexity

-

The password is a string containing at least eight characters chosen from three or four of the following types: uppercase letters, lowercase letters, digits, and special characters.

-

All passwords must comply with the complexity requirements.

-

No

-

Password retry limits

-

If a user fails to enter the correct password for three consecutive times when logging in to the OS, the user account will be locked for 60 seconds.

-

After the account is locked, the user can log in to the system only after 60 seconds.

-

Yes

-

Default umask value

-

The default umask value of all users is set to 077 so that the default permission on files created by users is 600 and the default permission on directories is 700.

-

Users must modify the permission on specified files or directories as required.

-

Yes

-

Password validity period

-

The password validity period can be modified in the /etc/login.defs file and is set to 90 days by default. It can be modified in any time. An expiration notification will be displayed seven days before a password is to expire.

-

When a user attempts to log in after the password expires, the user will be informed of the password expiry and is required to change the password. If the user does not change the password, the user cannot access the system.

-

No

-

su permission control

-

The su command is used to switch user accounts. To improve system security, only the user root and users in the wheel group can use the su command.

-

Common users can successfully run the su command only after joining in the wheel group.

-

Yes

-

Disabling user root from logging in using SSH

-

Set the value of the PermitRootLogin field in the /etc/ssh/sshd_config file to no. In this way, user root cannot directly log in to the system using SSH.

-

You need to log in to the system as a common user in SSH mode and then switch to user root.

-

No

-

Strong SSH encryption algorithm

-

The MACs and Ciphers configurations of SSH services support the CTR and SHA2 algorithms and do not support the CBC, MD5, and SHA1 algorithms.

-

Some early Xshell and PuTTY versions do not support aes128-ctr, aes192-ctr, aes256-ctr, hmac-sha2-256, and hmac-sha2-512 algorithms. Ensure that the latest PuTTY (0.63 or later) and Xshell (5.0 or later) are used.

-

Yes

-
- diff --git a/docs/en/docs/SecHarden/public_sys-resources/icon-caution.gif b/docs/en/docs/SecHarden/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/SecHarden/public_sys-resources/icon-danger.gif b/docs/en/docs/SecHarden/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/SecHarden/public_sys-resources/icon-note.gif b/docs/en/docs/SecHarden/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/SecHarden/public_sys-resources/icon-notice.gif b/docs/en/docs/SecHarden/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/SecHarden/public_sys-resources/icon-tip.gif b/docs/en/docs/SecHarden/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/SecHarden/public_sys-resources/icon-warning.gif b/docs/en/docs/SecHarden/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/SecHarden/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/SecHarden/secHarden.md b/docs/en/docs/SecHarden/secHarden.md deleted file mode 100644 index 6539f6bd1eb52da6f2076345facd9c4759538a79..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/secHarden.md +++ /dev/null @@ -1,5 +0,0 @@ -# Security Hardening Guide - -This document describes how to perform security hardening for openEuler. - -This document is intended for administrators who need to perform security hardening for openEuler. You must be familiar with the OS security architecture and technologies. diff --git a/docs/en/docs/SecHarden/security-hardening-guide.md b/docs/en/docs/SecHarden/security-hardening-guide.md deleted file mode 100644 index 38b3bec79c4e11aaebbd80e5b19204ebfb9edc7a..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/security-hardening-guide.md +++ /dev/null @@ -1,4 +0,0 @@ -# Security Hardening Guide - -You can modify the hardening policy configuration file or script to harden the system. This chapter describes the hardening items, whether the items are hardened by default, and how to perform security hardening. - diff --git a/docs/en/docs/SecHarden/security-hardening-tools.md b/docs/en/docs/SecHarden/security-hardening-tools.md deleted file mode 100644 index 8aecf8637386d110f16e28b56ee14871affff539..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/security-hardening-tools.md +++ /dev/null @@ -1,137 +0,0 @@ -# Security Hardening Tools - -- [Security Hardening Tools](#security-hardening-tools) - - [Security Hardening Procedure](#security-hardening-procedure) - - [Hardening Items Taking Effect](#hardening-items-taking-effect) - - - - - -## Security Hardening Procedure - -### Overview - -You need to modify the **usr-security.conf** file so that the security hardening tool can set hardening policies based on the **usr-security.conf** file. This section describes rules for modifying the **usr-security.conf** file. For details about the configurable security hardening items, see [Security Hardening Guide](#security-hardening-guide.md). - -### Precautions - -- After modifying the items, restart the security hardening service for the modification to take effect. For details about how to restart the service, see [Hardening Items Taking Effect](#hardening-items-taking-effect.md). -- When modifying security hardening items, you only need to modify the **/etc/openEuler\_security/usr-security.conf** file. You are not advised to modify the **/etc/openEuler\_security/security.conf** file. The **security.conf** file contains basic hardening items which are executed only once. -- After the security hardening service is restarted for the configuration to take effect, the previous configuration cannot be deleted by deleting the corresponding hardening items from the **usr-security.conf** file and restarting the security hardening service. -- Security hardening operations are recorded in the **/var/log/openEuler-security.log** file. - -### Configuration Format - -Each line in the **usr-security.conf** file indicates a configuration item. The configuration format varies according to the configuration content. The following describes the format of each configuration item. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- All configuration items start with an execution ID. The execution ID is a positive integer and can be customized. ->- Contents of a configuration item are separated by an at sign \(@\). ->- If the actual configuration content contains an at sign \(@\), use two at signs \(@@\) to distinguish the content from the separator. For example, if the actual content is **xxx@yyy**, set this item to **xxx@@yyy**. Currently, an at sign \(@\) cannot be placed at the beginning or end of the configuration content. - -   - -- **d**: comment - - Format: _Execution ID_**@d@**_Object file_**@**_Match item_ - - Function: Comment out lines starting with the match item \(the line can start with a space\) in an object file by adding a number sign \(\#\) at the beginning of the line. - - Example: If the execution ID is **401**, comment out lines starting with **%wheel** in the **/etc/sudoers** file. - - ``` - 401@d@/etc/sudoers@%wheel - ``` - - -- **m**: replacement - - Format: _Execution ID_**@m@**_Object file_**@**_Match item_**@**_Target value_ - - Function: Replace lines starting with the match item \(the line can start with a space\) in an object file with _match item_ and _target value_. If the match line starts with spaces, the spaces will be deleted after the replacement. - - Example: If the execution ID is **101**, replace lines starting with **Protocol** in the **/etc/ssh/sshd\_config** file with **Protocol 2**. The spaces after **Protocol** are matched and replaced. - - ``` - 101@m@/etc/ssh/sshd_config@Protocol @2 - ``` - -- **sm**: accurate modification - - Format: _Execution ID_**@sm@**_Object file_**@**_Match item_**@**_Target value_ - - Function: Replace lines starting with the match item \(the line can start with a space\) in an object file with _match item_ and _target value_. If the match line starts with spaces, the spaces are retained after the replacement. This is the difference between **sm** and **m**. - - Example: If the execution ID is **201**, replace lines starting with **size** in the **/etc/audit/hzqtest** file with **size 2048**. - - ``` - 201@sm@/etc/audit/hzqtest@size@ 2048 - ``` - - -- **M**: subitem modification - - Format: _Execution ID_**@M@**_Object file_**@**_Match item_**@**_Match subitem__\[@Value of the match subitem\]_ - - Function: Match lines starting with the match item \(the line can start with a space\) in an object file and replace the content starting with the match subitem in these lines with the _match subitem_ and _value of the match subitem_. The value of the match subitem is optional. - - Example: If the execution ID is **101**, find lines starting with **key** in the file and replace the content starting with **key2** in these lines with **key2value2**. - - ``` - 101@M@file@key@key2@value2 - ``` - -- **systemctl**: service management - - Format: _Execution ID_**@systemctl@**_Object service_**@**_Operation_ - - Function: Use **systemctl** to manage object services. The value of **Operation** can be **start**, **stop**, **restart**, or **disable**. - - Example: If the execution ID is **218**, stop the **cups.service**. This provides the same function as running the **systemctl stop cups.service** command. - - ``` - 218@systemctl@cups.service@stop - ``` - -    - -- Other commands - - Format: _Execution ID_**@**_Command_**@**_Object file_ - - Function: Run the corresponding command, that is, run the command line _Command_ _Object file_. - - Example 1: If the execution ID is **402**, run the **rm -f** command to delete the **/etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem** file. - - ``` - 402@rm -f @/etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem - ``` - - Example 2: If the execution ID is **215**, run the **touch** command to create the **/etc/cron.allow** file. - - ``` - 215@touch @/etc/cron.allow - ``` - - Example 3: If the execution ID is **214**, run the **chown** command to change the owner of the **/etc/at.allow** file to **root:root**. - - ``` - 214@chown root:root @/etc/at.allow - ``` - - Example 4: If the execution ID is **214**, run the **chmod** command to remove the **rwx** permission of the group to which the owner of the** /etc/at.allow** file belongs and other non-owner users. - - ``` - 214@chmod og-rwx @/etc/at.allow - ``` - - -## Hardening Items Taking Effect - -After modifying the **usr-security.conf** file, run the following command for the new configuration items to take effect: - -``` -systemctl restart openEuler-security.service -``` - diff --git a/docs/en/docs/SecHarden/selinux-configuration.md b/docs/en/docs/SecHarden/selinux-configuration.md deleted file mode 100644 index 6644c4cc137bcee59b900dfd0333effb3ba907aa..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/selinux-configuration.md +++ /dev/null @@ -1,67 +0,0 @@ -# SELinux Configuration - -## Overview - -Discretionary access control \(DAC\) determines whether a resource can be accessed based on users, groups, and other permissions. It does not allow the system administrator to create comprehensive and fine-grained security policies. SELinux \(Security-Enhanced Linux\) is a module of the Linux kernel and a security subsystem of Linux. SELinux implements mandatory access control \(MAC\). Each process and system resource has a special security label. In addition to the principles specified by the DAC, the SELinux needs to determine whether each type of process has the permission to access a type of resource. - -By default, openEuler uses SELinux to improve system security. SELinux has three modes: - -- **permissive**: The SELinux outputs alarms but does not forcibly execute the security policy. -- **enforcing**: The SELinux security policy is forcibly executed. -- **disabled**: The SELinux security policy is not loaded. - -## Configuration Description - -SELinux is enabled for openEuler by default and the default mode is enforcing. You can change the SELinux mode by changing the value of **SELINUX** in **/etc/selinux/config**. - -- To disable the SELinux policy, run the following command: - - ``` - SELINUX=disabled - ``` - -- To use the permissive policy, run the following command: - - ``` - SELINUX=permissive - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->When you switch between the disabled mode and the other mode, you need to restart the system for the switch to take effect. ->``` -># reboot ->``` - -## SELinux Commands - -- Query the SELinux mode. For example, the following shows that the SELinux mode is permissive. - - ``` - # getenforce - Permissive - ``` - -- Set the SELinux mode. **0** indicates the permissive mode, and **1** indicates the enforcing mode. For example, run the following command to set the SELinux mode to enforcing. This command cannot be used to set the disabled mode. After the system is restarted, the mode set in **/etc/selinux/config** is restored. - - ``` - # setenforce 1 - ``` - -- Query the SELinux status. **SELinux status** indicates the SELinux status. **enabled** indicates that SELinux is enabled, and **disabled** indicates that SELinux is disabled. **Current mode** indicates the current security policy of the SELinux. - - ``` - # sestatus - SELinux status: enabled - SELinuxfs mount: /sys/fs/selinux - SELinux root directory: /etc/selinux - Loaded policy name: targeted - Current mode: enforcing - Mode from config file: enforcing - Policy MLS status: enabled - Policy deny_unknown status: allowed - Memory protection checking: actual (secure) - Max kernel policy version: 31 - ``` - - diff --git a/docs/en/docs/SecHarden/system-services.md b/docs/en/docs/SecHarden/system-services.md deleted file mode 100644 index 8f90f69253b13d02a285b4075eadb7f22b8bdfd7..0000000000000000000000000000000000000000 --- a/docs/en/docs/SecHarden/system-services.md +++ /dev/null @@ -1,468 +0,0 @@ -# system-services - -- [System Services](#system-services) - - [Hardening the SSH Service](#hardening-the-ssh-service) - - -## hardening-the-ssh-service - -### Description - -The Secure Shell \(SSH\) is a reliable security protocol for remote logins and other network services. SSH prevents information disclosure during remote management. SSH encrypts transferred data to prevent domain name server \(DNS\) spoofing and IP spoofing. OpenSSH was created as an open source alternative to the proprietary SSH protocol. - -Hardening the SSH service is to modify configurations of the SSH service to set the algorithm and authentication parameters when the system uses the OpenSSH protocol, improving the system security. [Table 1](#en-us_topic_0152100390_ta2fdb8e4931b4c1a8f502b3c7d887b95) describes the hardening items, recommended hardening values, and default policies. - -### Implementation - -To harden a server, perform the following steps: - -1. Open the configuration file **/etc/ssh/sshd\_config** of the SSH service on the server, and modify or add hardening items and values in the file. -2. Save the **/etc/ssh/sshd\_config** file. -3. Run the following command to restart the SSH service: - - ``` - systemctl restart sshd - ``` - - -   - -To harden a client, perform the following steps: - -1. Open the configuration file **/etc/ssh/ssh\_config** of the SSH service on the client, and modify or add hardening items and values in the file. -2. Save the **/etc/ssh/ssh\_config** file. -3. Run the following command to restart the SSH service: - - ``` - systemctl restart sshd - ``` - - -### Hardening Items - -- Server hardening policies - - All SSH service hardening items are stored in the **/etc/ssh/sshd\_config** configuration file. For details about the server hardening items, hardening suggestions, and whether the hardening items are configured as suggested, see [Table 1](#en-us_topic_0152100390_ta2fdb8e4931b4c1a8f502b3c7d887b95). - - **Table 1** SSH hardening items on a server - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Item

-

Description

-

Suggestion

-

Configured as Suggested

-

Protocol

-

SSH protocol version.

-

2

-

Yes

-

SyslogFacility

-

Log type of the SSH service. The item is set to AUTH, indicating authentication logs.

-

AUTH

-

Yes

-

LogLevel

-

Level for recording SSHD logs.

-

VERBOSE

-

Yes

-

X11Forwarding

-

Specifies whether a GUI can be used after login using SSH.

-

no

-

Yes

-

MaxAuthTries

-

Maximum number of authentication attempts.

-

3

-

No

-

PubkeyAuthentication

-

Specifies whether public key authentication is allowed.

-

yes

-

Yes

-

RSAAuthentication

-

Specifies whether only RSA security authentication is allowed.

-

yes

-

Yes

-

IgnoreRhosts

-

Specifies whether the rhosts and shosts files are used for authentication. The rhosts and shosts files record the names of the servers that support remote access and related login names.

-

yes

-

Yes

-

RhostsRSAAuthentication

-

Specifies whether the RSA algorithm security authentication based on the rhosts file is used. The rhosts file records the names of the servers that support remote access and related login names.

-

no

-

Yes

-

HostbasedAuthentication

-

Specifies whether host-based authentication is used. Host-based authentication indicates that any user of a trusted client can use the SSH service.

-

no

-

Yes

-

PermitRootLogin

-

Specifies whether to allow user root to log in to the system using SSH.

-
NOTE:

If you want to log in to the system using SSH as user root, set the value of the PermitRootLogin field in the /etc/ssh/sshd_config file to yes.

-
-

no

-

No

-

PermitEmptyPasswords

-

Specifies whether accounts with empty passwords can log in.

-

no

-

Yes

-

PermitUserEnvironment

-

Specifies whether to resolve the environment variables set in ~/.ssh/environment and ~/.ssh/authorized_keys.

-

no

-

Yes

-

Ciphers

-

Encryption algorithm of SSH data transmission.

-

aes128-ctr,aes192-ctr,aes256-ctr,chacha20-poly1305@openssh.com,aes128-gcm@openssh.com,aes256-gcm@openssh.com

-

Yes

-

ClientAliveCountMax

-

Timeout count. After the server sends a request, if the number of times that the client does not respond reaches a specified value, the server automatically disconnects from the client.

-

0

-

No

-

Banner

-

File of the prompt information displayed before and after SSH login.

-

/etc/issue.net

-

Yes

-

MACs

-

Hash algorithm for SSH data verification.

-

hmac-sha2-512,hmac-sha2-512-etm@openssh.com,hmac-sha2-256,hmac-sha2-256-etm@openssh.com

-

Yes

-

StrictModes

-

Specifies whether to check the permission on and ownership of the home directory and rhosts file before SSH receives login requests.

-

yes

-

Yes

-

UsePAM

-

Specifies whether to use PAM for login authentication.

-

yes

-

Yes

-

AllowTcpForwarding

-

Specifies whether to allow TCP forwarding.

-

no

-

Yes

-

Subsystem sftp /usr/libexec/openssh/sftp-server

-

SFTP log record level, which records the INFO level and authentication logs.

-

-l INFO -f AUTH

-

Yes

-

AllowAgentForwarding

-

Specifies whether to allow SSH Agent forwarding.

-

no

-

Yes

-

GatewayPorts

-

Specifies whether SSH can connect to ports on the forwarding client.

-

no

-

Yes

-

PermitTunnel

-

Specifies whether Tunnel devices are allowed.

-

no

-

Yes

-

KexAlgorithms

-

SSH key exchange algorithms.

-

curve25519-sha256,curve25519-sha256@@libssh.org,diffie-hellman-group-exchange-sha256

-

Yes

-

LoginGraceTime

-

Time limit for users passing the authentication. 0 indicates no limit. The default value is 60 seconds.

-

60

-

No

-
- - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >By default, the messages displayed before and after SSH login are saved in the **/etc/issue.net** file. The default information in the **/etc/issue.net** file is **Authorized users only.** **All activities may be monitored and reported.** - - -- Client hardening policies - - All SSH service hardening items are stored in the **/etc/ssh/ssh\_config** configuration file. For details about the client hardening items, hardening suggestions, and whether the hardening items are configured as suggested, see [Table 2](#en-us_topic_0152100390_tb289c5a6f1c7420ab4339187f9018ea4). - - **Table 2** SSH hardening items on a client - - - - - - - - - - - - - - - - - - - -

Item

-

Description

-

Suggestion

-

Configured as Suggested

-

KexAlgorithms

-

SSH key exchange algorithms.

-

ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256

-

No

-

VerifyHostKeyDNS

-

Specifies whether to verify HostKey files by using DNS or SSHFP.

-

ask

-

No

-
- - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Third-party clients and servers that use the Diffie-Hellman algorithm are required to allow at least 2048-bit connection. - - -### Other Security Suggestions - -- The SSH service only listens on specified IP addresses. - - For security purposes, you are advised to only listen on required IP addresses rather than listen on 0.0.0.0 when using the SSH service. You can specify the IP addresses that SSH needs to listen on in the ListenAddress configuration item in the **/etc/ssh/sshd\_config** file. - - 1. Open and modify the **/etc/ssh/sshd\_config** file. - - ``` - vi /etc/ssh/sshd_config - ``` - - The following information indicates that the bound listening IP address is **192.168.1.100**. You can change the listening IP address based on the site requirements. - - ``` - ... - ListenAddress 192.168.1.100 - ... - ``` - - 2. Restart the SSH service. - - ``` - systemctl restart sshd.service - ``` - - - -- SFTP users are restricted from access to upper-level directories. - - SFTP is a secure FTP designed to provide secure file transfer over SSH. Users can only use dedicated accounts to access SFTP for file upload and download, instead of SSH login. In addition, directories that can be accessed over SFTP are limited to prevent directory traversal attacks. The configuration process is as follows: - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >In the following configurations, **sftpgroup** is an example user group name, and **sftpuser** is an example username. - - 1. Create an SFTP user group. - - ``` - groupadd sftpgroup - ``` - - 2. Create an SFTP root directory. - - ``` - mkdir /sftp - ``` - - 3. Modify the ownership of and permission on the SFTP root directory. - - ``` - chown root:root /sftp - chmod 755 /sftp - ``` - - 4. Create an SFTP user. - - ``` - useradd -g sftpgroup -s /sbin/nologin sftpuser - ``` - - 5. Set the password of the SFTP user. - - ``` - passwd sftpuser - ``` - - 6. Create a directory used to store files uploaded by the SFTP user. - - ``` - mkdir /sftp/sftpuser - ``` - - 7. Modify the ownership of and permission on the upload directory of the SFTP user. - - ``` - chown root:root /sftp/sftpuser - chmod 777 /sftp/sftpuser - ``` - - 8. Modify the **/etc/ssh/sshd\_config** file. - - ``` - vi /etc/ssh/sshd_config - ``` - - Modify the following information: - - ``` - #Subsystem sftp /usr/libexec/openssh/sftp-server -l INFO -f AUTH - Subsystem sftp internal-sftp -l INFO -f AUTH - ... - - Match Group sftpgroup - ChrootDirectory /sftp/%u - ForceCommand internal-sftp - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- **%u** is a wildcard character. Enter **%u** to represent the username of the current SFTP user. - >- The following content must be added to the end of the **/etc/ssh/sshd\_config** file: - > ``` - > Match Group sftpgroup - > ChrootDirectory /sftp/%u - > ForceCommand internal-sftp - > ``` - - 9. Restart the SSH service. - - ``` - systemctl restart sshd.service - ``` - - - -- Remotely execute commands using SSH. - - When a command is executed remotely through OpenSSH, TTY is disabled by default. If a password is required during command execution, the password is displayed in plain text. To ensure password input security, you are advised to add the **-t** option to the command. Example: - - ``` - ssh -t testuser@192.168.1.100 su - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >**192.168.1.100** is an example IP address, and **testuser** is an example username. diff --git a/docs/en/docs/StratoVirt/Install_StratoVirt.md b/docs/en/docs/StratoVirt/Install_StratoVirt.md deleted file mode 100644 index 94fb130ecb11e2144fdeb9b8c19d93cec572bc11..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/Install_StratoVirt.md +++ /dev/null @@ -1,39 +0,0 @@ -# Installing StratoVirt - -[[toc]] - -## Software and Hardware Requirements - -### Minimum Hardware Requirements - -- Processor architecture: Only the AArch64 and x86_64 processor architectures are supported. AArch64 requires ARMv8 or a later version and supports virtualization extension. x86_64 supports VT-x. - -- 2-core CPU -- 4 GiB memory -- 16 GiB available disk space - -### Software Requirements - -Operating system: openEuler 21.03 - - - -## Installing Components - -To use StratoVirt virtualization, it is necessary to install StratoVirt. Before the installation, ensure that the openEuler yum source has been configured. - -1. Run the following command as user root to install the StratoVirt components: - - ``` - # yum install stratovirt - ``` - - -2. Check whether the installation is successful. - - ``` - $ stratovirt -version - StratoVirt 0.1.0 - ``` - - diff --git a/docs/en/docs/StratoVirt/Interconnect_isula.md b/docs/en/docs/StratoVirt/Interconnect_isula.md deleted file mode 100644 index 19ab14cca575ce5580335b22b84da15f0b38fb22..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/Interconnect_isula.md +++ /dev/null @@ -1,110 +0,0 @@ -# Connecting to the iSula Security Container - -[[toc]] - -## Overview - -To provide a better isolation environment for containers and improve system security, it is recommended to use the iSula security container, that is, connect StratoVirt to the iSula security container. - -## Connecting to the iSula Security Container - -### **Prerequisites** - -The iSulad and kata-containers have been installed, and the iSulad supports the kata-runtime container runtime and the devicemapper storage driver. - -The following describes how to install and configure iSulad and kata-containers. - -1. Configure the yum repo and install iSulad and kata-containers as user **root**. - - ```shell - # yum install iSulad - # yum install kata-containers - ``` - -2. Create and configure a storage device. - - Plan the drive in advance, for example, /dev/sdx, which will be formatted. - - ```shell - # pvcreate /dev/sdxx - # vgcreate isulaVG0 /dev/sdxx - # lvcreate --wipesignatures y -n thinpool isulaVG0 -l 95%VG - # lvcreate --wipesignatures y -n thinpoolmeta isulaVG0 -l 1%VG - # lvconvert -y --zero n -c 512K --thinpool isulaVG0/thinpool --poolmetadata isulaVG0/thinpoolmeta - ``` - - Add the following information to the **/etc/lvm/profile/isulaVG0-thinpool.profile** file: - - ``` - activation { - thin_pool_autoextend_threshold=80 - thin_pool_autoextend_percent=20 - } - ``` - - Modify storage-driver and storage-opts in the **/etc/isulad/daemon.json** configuration file as follows. Set the default storage driver type **overlay** to **devicemapper**. - - ``` - "storage-driver": "devicemapper", - "storage-opts": [ - "dm.thinpooldev=/dev/mapper/isulaVG0-thinpool", - "dm.fs=ext4", - "dm.min_free_space=10%" - ], - ``` - -3. Restart iSulad. - - ```shell - # systemctl daemon-reload - # systemctl restart isulad - ``` - -4. Check whether the iSula storage driver is successfully configured. - - ```shell - # isula info - ``` - - If the following information is displayed, the configuration is successful: - - ``` - Storage Driver: devicemapper - ``` - -### **Connection Guide** - -StratoVirt connects to the iSula security container, that is, StratoVirt connects to kata-runtime in the iSula security container. The procedure is as follows: - -1. Modify the kata configuration file (the default path is **/usr/share/defaults/kata-containers/configuration.toml**). Set the hypervisor type of the secure container to **stratovirt**, the kernel to the absolute path of the StratoVirt kernel image, and initrd to the initrd image file of kata-containers. When using yum to install kata-containers, the two image files are downloaded by default and stored in the **/var/lib/kata/ directory**. Other images is permitted as well. - - The configuration is as follows: - - ```shell - [hypervisor.stratovirt] - path = "/usr/bin/stratovirt" - kernel = "/var/lib/kata/vmlinux.bin" - initrd = "/var/lib/kata/kata-containers-initrd.img" - block_device_driver = "virtio-mmio" - use_vsock = true - enable_netmon = true - internetworking_model="tcfilter" - sandbox_cgroup_with_emulator = false - disable_new_netns = false - disable_block_device_use = false - disable_vhost_net = true - ``` - -2. Use the root permission and **isula** command to run the BusyBox security container to connect the StratoVirt to the security container. - - ```shell - # isula run -tid --runtime=kata-runtime --net=none --name test busybox:latest sh - ``` - -3. Run the **isula ps** command to check whether the test security container is running properly. If yes, run the following command to access the test container: - - ``` - # isula exec –ti test sh - ``` - -Now, it is feasible to run container commands in the test container. \ No newline at end of file diff --git a/docs/en/docs/StratoVirt/Manage_life_cycle.md b/docs/en/docs/StratoVirt/Manage_life_cycle.md deleted file mode 100644 index a859b7471175bba9f9a2d4d4e855b3e4df898ce2..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/Manage_life_cycle.md +++ /dev/null @@ -1,124 +0,0 @@ -# Managing the VM Lifecycle - -[[toc]] - -## Overview - -This section describes how to use StratoVirt to manage the lifecycle of a VM, namely starting, pausing, resuming, and exiting a VM. - - - -## Creating and Starting a VM - -As described in the section "Configuring a VM", users can specify the VM configuration by using command line parameters or the JSON file, and run the stratovirt command on the host to create and start a VM. - -- Run the following command to create and start a VM: - -``` -$/path/to/stratovirt - [Parameter 1] [Parameter Option] - [Parameter 2] [Parameter Option]... -``` - - - -- Use the JSON file to provide the VM configuration. The command for creating and starting a VM is as follows: - -``` -$ /path/to/stratovirt \ - -config /path/to/json \ - -api-channel unix:/path/to/socket -``` - -Where, /path/to/json indicates the path of the JSON configuration file. /path/to/socket is the socket file specified by the user (for example, /tmp/stratovirt.socket). After the command is executed, the socket file is automatically created. Ensure that the socket file does not exist before executing the command, so that the VM can be started properly. - - - -> ![](./figures/en-05.png) -> -> After the VM is started, there are two NICs: eth0 and eth1. The two NICs are reserved for hot plugging: eth0 first, and then eth1. Currently, only two virtio-net NICs can be hot-plugged. - - - -## Connecting a VM - -StratoVirt uses QMP to manage VMs. To pause, resume, and exit a VM, connect it to StratoVirt through QMP first. - -Open a new CLI (CLI B) on the host and run the following command to perform the api-channel connection: - -``` -$ ncat -U /path/to/socket -``` - -After the connection is set up, a greeting message will be received from StratoVirt, as shown in the following figure. - -``` -{"QMP":{"version":{"qemu":{"micro":1,"minor":0,"major":4},"package":""},"capabilities":[]}} -``` - -Now, manage the VM by entering QMP commands in CLI B. - - - -> ![](./figures/en-05.png) -> -> QMP provides stop, cont, quit, and query-status to manage and query the VM status. -> -> All QMP commands for managing VMs are entered in CLI B. `<-` indicates the command input, and `->` indicates the QMP returned result. - - - - - -## Pausing a VM - -QMP provides the stop command for pausing a VM, that is, pausing all vCPUs of the VM. Command format: - -**{"execute":"stop"}** - -**Example:** - -Run the stop command to pause the VM. The command output is as follows: - -``` -<- {"execute":"stop"} --> {"event":"STOP","data":{},"timestamp":{"seconds":1583908726,"microseconds":162739}} --> {"return":{}} -``` - - - - - -## Resuming a VM - -QMP provides the cont command to resume a VM, that is, to resume all vCPUs of the VM. Command format: - -**{"execute":"cont"}** - -**Example:** - -Run the cont command to resume the VM. The command output is as follows: - -``` -<- {"execute":"cont"} --> {"event":"RESUME","data":{},"timestamp":{"seconds":1583908853,"microseconds":411394}} --> {"return":{}} -``` - - - - - -## Exiting a VM - -QMP provides the quit command to exit a VM, that is, to exit the StratoVirt process. Command format: - -**{"execute":"quit"}** - -**Example:** - -``` -<- {"execute":"quit"} --> {"event":"SHUTDOWN","data":{"guest":false,"reason":"host-qmp-quit"},"timestamp":{"ds":1590563776,"microseconds":519808}} --> {"return":{}} -``` - diff --git a/docs/en/docs/StratoVirt/Manage_resource.md b/docs/en/docs/StratoVirt/Manage_resource.md deleted file mode 100644 index 9bafea1cda504e634ae6412a2f593bdf223ae2f6..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/Manage_resource.md +++ /dev/null @@ -1,138 +0,0 @@ -#Managing VM resources - -[[toc]] - -## Overview - -This section describes how to use QMP commands to manage disks and NICs. - - - -> ![](./figures/en-05.png) -> -> StratoVirt uses QMP to manage VMs. Before using QMP to manage VM resources, use it to connect StratoVirt to the VM. For details, see "Managing the VM Life Cycle". - - - -## Hot-Pluggable Hard Disks - -StratoVirt supports adjusting the number of disks during VM running. That is, you can add or delete VM disks without interrupting services. - -### Hot Plugged-in Disk - -**Usage** - -``` -{"execute": "blockdev-add", "arguments": {"node-name": "drive-0", "file": {"driver": "file", "filename": "/path/to/block"}, "cache": {"direct": true}, "read-only": false}} -{"execute": "device_add", "arguments": {"id": "drive-0", "driver": "virtio-blk-mmio", "addr": "0x1"}} -``` - -**Parameter** - -- The value of node-name in blockdev-add must be the same as the value of id in device_add. They are both drive-0. - -- /path/to/block is the mirror path of the hot plugged-in disk. It cannot be the path of the disk image that boots the rootfs. -- For addr, 0x0 is mapped to vda of the VM, 0x1 is mapped to vdb, and so on. To be compatible with the QMP protocol, "addr" can be replaced by "lun", but lun=0 is mapped to the vdb of the client. Only six virtio-blk disks can be hot added. - -**Example** - -``` -<- {"execute": "blockdev-add", "arguments": {"node-name": "drive-0", "file": {"driver": "file", "filename": "/path/to/block"}, "cache": {"direct": true}, "read-only": false}} --> {"return": {}} -<- {"execute": "device_add", "arguments": {"id": "drive-0", "driver": "virtio-blk-mmio", "addr": "0x1"}} --> {"return": {}} -``` - - - -### Hot Plugged-out Disk - -**Usage** - -**{"execute": "device_del", "arguments": {"id":"drive-0"}}** - -**Parameter** - -id indicates the ID of the hot plugged-out disk. - -**Example** - -``` -<- {"execute": "device_del", "arguments": {"id": "drive-0"}} --> {"event":"DEVICE_DELETED","data":{"device":"drive-0","path":"drive-0"},"timestamp":{"seconds":1598513162,"microseconds":367129}} --> {"return": {}} -``` - - - -## Hot-Pluggable NIC - -StratoVirt allows users to adjust the number of NICs during VM running. That is, users can add or delete NICs for VMs without interrupting services. - -### Hot Plugged-in NIC - -**Preparations (Requiring the Root Permission)** - -1. Create and enable a Linux bridge. For example, if the bridge name is qbr0, run the following command: - - ```shell - # brctl addbr qbr0 - # ifconfig qbr0 up - ``` - -2. Create and enable a tap device. For example, if the tap device name is **tap0**, run the following command: - - ```shell - # ip tuntap add tap0 mode tap - # ifconfig tap0 up - ``` - -3. Add the tap device to the bridge. - - ```shell - # brctl addif qbr0 tap0 - ``` - - ​ - -**Usage** - -``` -{"execute":"netdev_add", "arguments":{"id":"net-0", "ifname":"tap0"}} -{"execute":"device_add", "arguments":{"id":"net-0", "driver":"virtio-net-mmio", "addr":"0x0"}} -``` - -**Parameter** - -- The ID in netdev_add must be the same as that in device_add. Ifname indicates the name of the TAP device. - -- For addr, 0x0 is mapped to eth0 of the VM, and 0x1 to eth1. Only two virtio-net NICs can be hot plugged in. - - -**Example** - -``` -<- {"execute":"netdev_add", "arguments":{"id":"net-0", "ifname":"tap0"}} -<- {"execute":"device_add", "arguments":{"id":"net-0", "driver":"virtio-net-mmio", "addr":"0x0"}} -``` - -Where, addr:0x0 corresponds to eth0 in the VM. - -### Hot Plugged-out NIC - -**Usage** - -**{"execute": "device_del", "arguments": {"id": "net-0"}}** - -**Parameter** - -id: specifies the NIC ID, for example, net-0. - -**Example** - -``` -<- {"execute": "device_del", "arguments": {"id": "net-0"}} --> {"event":"DEVICE_DELETED","data":{"device":"net-0","path":"net-0"},"timestamp":{"seconds":1598513339,"microseconds":97310}} --> {"return": {}} -``` - diff --git a/docs/en/docs/StratoVirt/Prepare_env.md b/docs/en/docs/StratoVirt/Prepare_env.md deleted file mode 100644 index 53577626b9e258a44abb2d3fb24b5d6612fc329a..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/Prepare_env.md +++ /dev/null @@ -1,148 +0,0 @@ -# Preparing the Environment - -[[toc]] - -## Usage - -- StratoVirt supports only Linux VMs that use the x86_64 or AArch64 processor architecture and start the VM with same architecture. -- It is advised to compile, commissione, and deploye the StratoVirt on openEuler 21.03. -- StratoVirt can run with non-root permissions. - -## Environment Requirements - -The following environment is required for running StratoVirt: - -- /dev/vhost-vsock device (for implementing the MMIO) -- Nmap tool -- Kernel image and rootfs image - - - -## Preparing Devices and Tools - -- StratoVirt needs to implement the MMIO device. Therefore, before running StratoVirt, ensure that the `/dev/vhost-vsock` device exists. - - Check whether the device exists. - - ``` - $ ls /dev/vhost-vsock - /dev/vhost-vsock - ``` - - If the device does not exist, run the following command to generate the /dev/vhost-vsock device: - - ``` - $ modprobe vhost_vsock - ``` - - -- To use QMP commands, install the nmap tool. After configuring the yum source, run the following command to install the nmap tool: - - ``` - # yum install nmap - ``` - -## Preparing Images - -### Creating the Kernel Image - -The StratoVirt of the current version supports only the PE kernel image of the x86_64 and AArch64 platforms. The kernel image in PE format can be generated by using the following method: - -1. Run the following command to obtain the kernel source code of the openEuler: - - ``` - $ git clone https://gitee.com/openeuler/kernel - $ cd kernel - ``` - -2. Run the following command to check and switch the kernel version to 4.19: - - ``` - $ git checkout kernel-4.19 - ``` - -3. Configure and compile the Linux kernel. It is better to use the recommended configuration file ([Obtain configuration file](https://gitee.com/openeuler/stratovirt/tree/master/docs/kernel_config)). Copy it to the kernel directory, and rename it as `.config`. You can also run the following command to configure the kernel as prompted: - - ``` - $ make menuconfig - ``` - -4. Run the following command to create and convert the kernel image to the PE format. The converted image is vmlinux.bin. - - ``` - $ make -j vmlinux && objcopy -O binary vmlinux vmlinux.bin - ``` - - After the compilation is complete, the kernel image vmlinux.bin is generated in the current directory. - - ​ - -## Creating the Rootfs Image - -The rootfs image is a file system image. When the StratoVirt is started, the ext4 image with init can be loaded. To create an ext4 rootfs image, perform the following steps: - -1. Prepare a file with a proper size (for example, create a file with the size of 10 GiB in /home). - - ``` - $ cd /home - $ dd if=/dev/zero of=./rootfs.ext4 bs=1G count=10 - ``` - -2. Create an empty ext4 file system on this file. - - ``` - $ mkfs.ext4 ./rootfs.ext4 - ``` - -3. Mount the file image. Create the /mnt/rootfs directory and mount rootfs.ext4 to the /mnt/rootfs directory as user root. - - ``` - $ mkdir /mnt/rootfs - $ cd /home - $ sudo mount ./rootfs.ext4 /mnt/rootfs && cd /mnt/rootfs - ``` - -4. Obtain the latest alpine-mini rootfs of the corresponding processor architecture. - - - If the AArch64 processor architecture is used, run the following command: - - ``` - $ wget http://dl-cdn.alpinelinux.org/alpine/latest-stable/releases/aarch64/alpine-minirootfs-3.12.0-aarch64.tar.gz - $ tar -zxvf alpine-minirootfs-3.12.0-aarch64.tar.gz - $ rm alpine-minirootfs-3.12.0-aarch64.tar.gz - ``` - - - - For the x86_64 processor architecture, run the following command: - - ``` - $ wget http://dl-cdn.alpinelinux.org/alpine/latest-stable/releases/x86_64/alpine-minirootfs-3.12.0-x86_64.tar.gz - $ tar -zxvf alpine-minirootfs-3.12.0-x86_64.tar.gz - $ rm alpine-minirootfs-3.12.0-x86_64.tar.gz - ``` - - -5. Run the following command to create a simple /sbin/init for the ext4 file image: - - ``` - $ rm sbin/init; touch sbin/init && cat > sbin/init < { "return": { "running": true,"singlestep": false,"status": "running" } -``` - - - -## Querying Topology Information - -Run the query-cpus command to query the topology of all CPUs. - -- Usage: - -**{ "execute": "query-cpus" }** - -- Example: - -``` -<- { "execute": "query-cpus" } --> {"return":[{"CPU":0,"arch":"x86","current":true,"halted":false,"props":{"core-id":0,"socket-id":0,"thread-id":0},"qom_path":"/machine/unattached/device[0]","thread_id":8439},{"CPU":1,"arch":"x86","current":true,"halted":false,"props":{"core-id":0,"socket-id":1,"thread-id":0},"qom_path":"/machine/unattached/device[1]","thread_id":8440}]} -``` - -## Querying vCPU Online Status - -Run the query-hotpluggable-cpus command to query the online or offline status of all vCPUs. - -- Usage: - -**{ "execute": "query-hotpluggable-cpus" }** - -- Example: - -``` -<- { "execute": "query-hotpluggable-cpus" } --> {"return":[{"props":{"core-id":0,"socket-id":0,"thread-id":0},"qom-path":"/machine/unattached/device[0]","type":"host-x86-cpu","vcpus-count":1},{"props":{"core-id":0,"socket-id":1,"thread-id":0},"qom-path":"/machine/unattached/device[1]","type":"host-x86-cpu","vcpus-count":1}]} -``` - -Where, online vCPUs have the `qom-path` item, while offline vCPUs do not. diff --git a/docs/en/docs/StratoVirt/StratoVirt_Intoduction.md b/docs/en/docs/StratoVirt/StratoVirt_Intoduction.md deleted file mode 100644 index 685ddd668f4faf7dba5102bf3f97f240a5697bf8..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/StratoVirt_Intoduction.md +++ /dev/null @@ -1,25 +0,0 @@ -# Introduction to StratoVirt - -[[toc]] - -## Overview - -StratoVirt is an enterprise-class virtualization platform for cloud data centers in the computing industry, enabling a unified architecture that supports virtual machines, containers and Serverless scenarios. On top of that, StratoVirt has a key technology competitive advantage in terms of lightweight, low noise, hard and soft collaboration, Rust language-level security, and more. StratoVirt reserves the capabilities and interfaces for component assembly in architectural design and interfaces. More importantly, StratoVirt has the flexibility to assemble advanced features on demand until it evolves to support standard virtualization, finding the best balance between feature requirements, applications scenarios, and light flexibility. - - - -## Architecture Description - -The StratoVirt core architecture is divided into three layers from top to bottom: - -- OCI: compatible with the QEMU Machine Protocol (QMP), which has complete OCI compatibility capabilities. -- BootLoader: discards the traditional BIOS+GRUB boot mode and implements a lighter and faster bootloader. -- MicroVM: virtualization layer, which fully leverages the capability of software and hardware collaboration to simplify the device model and the capability of low-latency resource scaling. - -The overall architecture is shown in **Figure 1**. - -**Figure 1** Overall architecture of StratoVirt - -![](./figures/arc.png) - - diff --git a/docs/en/docs/StratoVirt/StratoVrit_guidence.md b/docs/en/docs/StratoVirt/StratoVrit_guidence.md deleted file mode 100644 index 461f0bf0490f0a18176972f10c4ea8f7edee1491..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/StratoVrit_guidence.md +++ /dev/null @@ -1,4 +0,0 @@ -# StratoVirt Virtualization User Guide - -This document describes Stratovirt virtualization, providing instructions on how to install Stratovirt based on openEuler and how to use Stratovirt virtualization. The purpose is to help users learn about Stratovirt and guide users and administrators to install and use StratoVirt. - diff --git a/docs/en/docs/StratoVirt/VM_configuration.md b/docs/en/docs/StratoVirt/VM_configuration.md deleted file mode 100644 index 55d47b434a04e42f85bf44b7aee386d666f178e6..0000000000000000000000000000000000000000 --- a/docs/en/docs/StratoVirt/VM_configuration.md +++ /dev/null @@ -1,232 +0,0 @@ -# Configuring a VM - -## Overview - -Different from Libvirt that uses XML files to configure VMs, StratoVirt can use command line parameters or the JSON file to configure the VM CPU, memory, and disk information. This section describes the two configuration methods. - -> ![](./figures/en-05.png) -> -> If both methods can be used, incline to the command line configuration. -> -> In this document, /path/to/socket is the socket file in the user-defined path. - - - - - -## Specifications - -- Number of VM CPUs: [1,254] -- VM memory size: [128MiB,512GiB] -- Number of VM disks (including hot swap disks): [0,6] -- Number of VM NICs (including hot swap NICs): [0,2] -- The VM console device supports only single way connection. -- On the x86_64 platform, a maximum of 11 mmio devices can be configured. But a maximum of two other devices except disks and NICs is recommended. On the AArch64 platform, the maximum of mmio devices is 160, but the maximum of other devices is recommend to be 12, also excluding disks and NICs. - -## Minimum Configuration - -The minimum configuration of the StratoVirt is as follows: - -- There is a Linux kernel file in PE format. -- Set the rootfs image as the virtio-blk device and add it to kernel parameters. -- Use api-channel to control StratoVirt. -- If you want to use ttyS0 for login, add a serial port to the startup command line and add ttyS0 to kernel parameters. - - - -## Command Line Configuration - -**Overview** - -Command line configuration directly specifies the VM configuration content using command line parameters. - -**Command Format** - -The format of the command configured by running the cmdline command is as follows: - -**$ /path/to/stratovirt** *-[Parameter 1] [Parameter Option] -[Parameter 2] [Parameter Option] ...* - -**Usage** - -1. To ensure that the socket required by api-channel can be created, run the following command to clear the environment: - - ``` - $rm [parameter] [user-defined socket file path] - ``` - - -2. Run the cmdline command. - - ``` - $ /path/to/stratovirt -[Parameter 1] [Parameter Option] -[Parameter 2] [Parameter Option] ... - ``` - - -**Parameter Description** - -The following table lists the parameters of the cmdline command. - -**Table 1** Description of command line configuration parameters - -| Parameter | Value | Description | -| ---------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| -name | *VMName* | Configures the VM name (a string of 1 to 255 characters).| -| -kernel | /path/to/vmlinux.bin | Configures the kernel image.| -| -append | console=ttyS0 root=/dev/vda reboot=k panic=1 | Configures kernel command line parameters.| -| -initrd | /path/to/initrd.img | Configures the initrd file.| -| -smp | [cpus=] Number of CPUs | Configures the number of CPUs. The value range is [1,254].| -| -m | Byte/MiB/GiB | Configures the memory size. The value range is [128MiB,512GiB]. | -| -drive | id=rootfs,file=/path/to/rootfs[,readonly=false,direct=true,serial=serial_num] | Configures the virtio-blk device.| -| -netdev | id=iface_id,netdev=tap0[,mac=mac_address] | Configures the virtio-net device.| -| -chardev | id=console_id,path=/path/to/socket | Configures virtio-console. Ensure that the socket file does not exist before running the command.| -| -device | vsock,id=vsock_id,guest-cid=3 | Configures vhost-vsock.| -| -api-channel | unix:/path/to/socket | Configures api-channel. Before running this command, ensure that the socket file does not exist.| -| -serial | stdio | Configures a serial port device.| -| -D | /path/to/logfile | Configures log files.| -| -pidfile | /path/to/pidfile | Configures the PID file. This parameter must be used together with -daemonize.| -| -disable-seccomp | N/A | Disables the Seccomp, which is enabled by default.| -| -omit_vm_memory | N/A | Do not dump the VM memory when the process enters the panic state.| -| -daemonize | N/A | Enables the daemon process.| - - - -**Example** - -1. Delete the socket file to ensure that the api-channel can be created. - - ``` - $ rm -f /tmp/stratovirt.socket - ``` - - -2. Run StratoVirt. - - ``` - $ /path/to/stratovirt \ - -kernel /path/to/vmlinux.bin \ - -append console=ttyS0 root=/dev/vda rw reboot=k panic=1 \ - -drive file=/home/rootfs.ext4,id=rootfs,readonly=false \ - -api-channel unix:/tmp/stratovirt.socket \ - -serial stdio - ``` - - After the running is successful, the VM is created and started based on the specified configuration parameters. - - - -## JSON Configuration - - - -**Overview** - -Configuration using the JSON file indicates that when running StratoVirt to create a VM, the system reads the specified JSON file that contains the VM configuration. - -**Command Format** - -The format of the command for configuring a VM using the JSON file is as follows. In this command, /path/to/json indicates the path of the corresponding file. - -**$ /path/to/stratovirt -config** */path/to/json -[Parameter] [Parameter Option]* - -**Usage** - -1. Create a JSON file and write the VM configuration to the file. - -2. Run the StratoVirt command to create a VM. - - ``` - $ /path/to/stratovirt -config /path/to/json - [Parameter] [Parameter Option] - ``` - -**Parameter Description** - -The following table describes the configurable parameters in the JSON file. - -**Table 2** Parameters in the configuration file - -| Parameter | Value | Description | -| -------------- | ------------------------------------------------------------ | ---------------------------------------------------- | -| boot-source | "kernel_image_path": "/path/to/vmlinux.bin","boot_args": "console=ttyS0 reboot=k panic=1 pci=off tsc=reliable ipv6.disable=1 root=/dev/vda quiet","initrd_fs_path": "/path/to/initrd.img" | Configures the kernel image and kernel parameters. The `initrd_fs_path` parameter is optional. | -| machine-config | "name": "abc","vcpu_count": 4,"mem_size": 805306368,"omit_vm_memory": true | Configures the virtual CPU and memory size. The `omit_vm_memory` parameter is optional. | -| drive | "drive_id": "rootfs","path_on_host": "/path/to/rootfs.ext4","read_only": false,"direct": true,"serial_num": "xxxxx" | Configures the virtio-blk disk. The `serial_num` parameter is optional. | -| net | "iface_id": "net0","host_dev_name": "tap0","mac": "xx:xx:xx:xx:xx:xx" | Configures the virtio-net NIC. The `mac` parameter is optional. | -| console | "console_id": "charconsole0","socket_path": "/path/to/socket" | Configures the virtio-console serial port. Before running the serial port, ensure that the socket file does not exist. | -| vsock | "vsock_id": "vsock0","guest_cid": 3 | Configures the virtio-vsock device. | -| serial | "stdio": true | Configures a serial port device.| - - - -The following table lists the parameters running in JSON. - -**Table 3** Parameters running in JSON - -| Parameter | Value | Description | -| ---------------- | -------------------- | ------------------------------------------------------------ | -| -config | /path/to/json | Configures the file path.| -| -api-channel | unix:/path/to/socket | Configures api-channel. Before running this command, ensure that the socket file does not exist. | -| -D | /path/to/logfile | Configures log files.| -| -pidfile | /path/to/pidfile | Configures the PID file, which must be used together with daemonize. Before running the command, make sure that the PID file does not exist. | -| -disable-seccomp | N/A | Disables the Seccomp, which is enabled by default. | -| -daemonize | N/A | Enables the daemon process.| - - - -**Example** - -1. Create a JSON file, for example, /home/config.json. The file content is as follows: - -``` -{ - "boot-source": { - "kernel_image_path": "/path/to/vmlinux.bin", - "boot_args": "console=ttyS0 reboot=k panic=1 pci=off tsc=reliable ipv6.disable=1 root=/dev/vda quiet" - }, - "machine-config": { - "name": "abc", - "vcpu_count": 2, - "mem_size": 268435456, - "omit_vm_memory": false - }, - "drive": [ - { - "drive_id": "rootfs", - "path_on_host": "/path/to/rootfs.ext4", - "direct": true, - "read_only": false, - "serial_num": "abcd" - } - ], - "net": [ - { - "iface_id": "net0", - "host_dev_name": "tap0", - "mac": "0e:90:df:9f:a8:88" - } - ], - "console": { - "console_id": "charconsole0", - "socket_path": "/path/to/console.socket" - }, - "serial": { - "stdio": true - }, - "vsock": { - "vsock_id": "vsock-123321132", - "guest_cid": 4 - } -} - -``` - - - -2. Run StratoVirt to read the JSON file and create and start the VM. - -``` -$ /path/to/stratovirt \ - -config /home/config.json \ - -api-channel unix:/tmp/stratovirt.socket -``` - -Successful execution of the command indicates that the VM is successfully created and started. - diff --git a/docs/en/docs/StratoVirt/figures/arc.png b/docs/en/docs/StratoVirt/figures/arc.png deleted file mode 100644 index baf5526d077a452c9d8a18af38638c8db9150d27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/StratoVirt/figures/arc.png and /dev/null differ diff --git a/docs/en/docs/StratoVirt/figures/en-05.png b/docs/en/docs/StratoVirt/figures/en-05.png deleted file mode 100644 index ad5ed3f7beeb01e6a48707c4806606b41d687e22..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/StratoVirt/figures/en-05.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/appendix.md b/docs/en/docs/Virtualization/appendix.md deleted file mode 100644 index 4277aa8ac9e0afb6a7e8bfa89764e7a8762708f6..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/appendix.md +++ /dev/null @@ -1,145 +0,0 @@ -# Appendix - -- [Appendix](#appendix.md) - - [Terminology & Acronyms and Abbreviations](#terminology-acronyms-and-abbreviations) - - -## Terminology & Acronyms and Abbreviations - -For the terminology & acronyms and abbreviation used in this document, see [Table 1](#table201236162279) and [Table 2](#table1423422319271). - -**Table 1** Terminology - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Term

-

Description

-

AArch64

-

AArch64 is an execution state of the ARMv8 architecture. AArch64 is not only an extension of the 32-bit ARM architecture, but also a brand new architecture in ARMv8 that uses the brand new A64 instruction set.

-

Domain

-

A collection of configurable resources, including memory, vCPUs, network devices, and disk devices. Run the VM in the domain. A domain is allocated with virtual resources and can be independently started, stopped, and restarted.

-

Libvirt

-

A set of tools used to manage virtualization platforms, including KVM, QEMU, Xen, and other virtualization platforms.

-

Guest OS

-

The OS running on the VM.

-

Host OS

-

The OS of the virtual physical machine.

-

Hypervisor

-

Virtual machine monitor (VMM), is an intermediate software layer that runs between a basic physical server and an OS. It allows multiple OSs and applications to share hardware.

-

VM

-

A complete virtual computer system that is constructed by using the virtualization technology and simulating the functions of a complete computer hardware system through software.

-
- -**Table 2** Acronyms and abbreviations - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Acronyms and abbreviations

-

Full spelling

-

Full name

-

Description

-

NUMA

-

Non-Uniform Memory Access Architecture

-

Non Uniform Memory Access Architecture

-

NUMA is a memory architecture designed for multi-processor computers. Under NUMA, a processor accesses its own local memory faster than accessing non-local memory (the memory is located on another processor, or the memory shared between processors).

-

KVM

-

Kernel-based Virtual Machine

-

Kernel-based VM

-

KVM is a kernel-based VM. It is a kernel module of Linux and makes Linux a hypervisor.

-

OVS

-

Open vSwitch

-

Open vSwitch

-

OVS is a high-quality multi-layer vSwitch that uses the open-source Apache 2.0 license protocol.

-

QEMU

-

Quick Emulator

-

Quick Emulator

-

QEMU is a general-purpose, open-source emulator that implements hardware virtualization.

-

SMP

-

Symmetric Multi-Processor

-

Symmetric Multi-Processor

-

SMP is a multi-processor computer hardware architecture. Currently, most processor systems use a symmetric multi-processor architecture. The architecture system has multiple processors, each processor shares the memory subsystem and bus structure.

-

UEFI

-

Unified Extensible Firmware Interface

-

Unified Extensible Firmware Interface

-

A standard that describes new interfaces in detail. This interface is used by the OS to automatically load the prestart operation environment to an OS.

-

VM

-

Virtual Machine

-

VM

-

A complete virtual computer system that is constructed by using the virtualization technology and simulating the functions of a complete computer hardware system through software.

-

VMM

-

Virtual Machine Monitor

-

VM Monitor

-

An intermediate software layer that runs between a basic physical server and an OS. It allows multiple OSs and applications to share hardware.

-
- diff --git a/docs/en/docs/Virtualization/best-practices.md b/docs/en/docs/Virtualization/best-practices.md deleted file mode 100644 index a3debf11f144805f152f838b11e6afa3422bcf2f..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/best-practices.md +++ /dev/null @@ -1,657 +0,0 @@ -# Best Practices - -[[toc]] - -## Performance Best Practices - -### Halt-Polling - -#### Overview - -If compute resources are sufficient, the halt-polling feature can be used to enable VMs to obtain performance similar to that of physical machines. If the halt-polling feature is not enabled, the host allocates CPU resources to other processes when the vCPU exits due to idle timeout. When the halt-polling feature is enabled on the host, the vCPU of the VM performs polling when it is idle. The polling duration depends on the actual configuration. If the vCPU is woken up during the polling, the vCPU can continue to run without being scheduled from the host. This reduces the scheduling overhead and improves the VM system performance. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The halt-polling mechanism ensures that the vCPU thread of the VM responds in a timely manner. However, when the VM has no load, the host also performs polling. As a result, the host detects that the CPU usage of the vCPU is high, but the actual CPU usage of the VM is not high. - -#### Instructions - -The halt-polling feature is enabled by default. You can dynamically change the halt-polling time of vCPU by modifying the **halt\_poll\_ns** file. The default value is **500000**, in ns. - -For example, to set the polling duration to 400,000 ns, run the following command: - -``` -# echo 400000 > /sys/module/kvm/parameters/halt_poll_ns -``` - -### I/O Thread Configuration - -#### Overview - -By default, QEMU main threads handle backend VM read and write operations on the KVM. This causes the following issues: - -- VM I/O requests are processed by a QEMU main thread. Therefore, the single-thread CPU usage becomes the bottleneck of VM I/O performance. -- The QEMU global lock \(qemu\_global\_mutex\) is used when VM I/O requests are processed by the QEMU main thread. If the I/O processing takes a long time, the QEMU main thread will occupy the global lock for a long time. As a result, the VM vCPU cannot be scheduled properly, affecting the overall VM performance and user experience. - -You can configure the I/O thread attribute for the virtio-blk disk or virtio-scsi controller. At the QEMU backend, an I/O thread is used to process read and write requests of a virtual disk. The mapping relationship between the I/O thread and the virtio-blk disk or virtio-scsi controller can be a one-to-one relationship to minimize the impact on the QEMU main thread, enhance the overall I/O performance of the VM, and improve user experience. - -#### Configuration Description - -To use I/O threads to process VM disk read and write requests, you need to modify VM configurations as follows: - -- Configure the total number of high-performance virtual disks on the VM. For example, set **** to **4** to control the total number of I/O threads. - - ``` - - VMName - 4194304 - 4194304 - 4 - 4 - ``` - -- Configure the I/O thread attribute for the virtio-blk disk. **** indicates I/O thread IDs. The IDs start from 1 and each ID must be unique. The maximum ID is the value of ****. For example, to allocate I/O thread 2 to the virtio-blk disk, set parameters as follows: - - ``` - - - - -
- - ``` - -- Configure the I/O thread attribute for the virtio-scsi controller. For example, to allocate I/O thread 2 to the virtio-scsi controller, set parameters as follows: - - ``` - - - -
- - ``` - -- Bind I/O threads to a physical CPU. - - Binding I/O threads to specified physical CPUs does not affect the resource usage of vCPU threads. **** indicates I/O thread IDs, and **** indicates IDs of the bound physical CPUs. - - ``` - - - - - ``` - - -### Raw Device Mapping - -#### Overview - -When configuring VM storage devices, you can use configuration files to configure virtual disks for VMs, or connect block devices \(such as physical LUNs and LVs\) to VMs for use to improve storage performance. The latter configuration method is called raw device mapping \(RDM\). Through RDM, a virtual disk is presented as a small computer system interface \(SCSI\) device to the VM and supports most SCSI commands. - -RDM can be classified into virtual RDM and physical RDM based on backend implementation features. Compared with virtual RDM, physical RDM provides better performance and more SCSI commands. However, for physical RDM, the entire SCSI disk needs to be mounted to a VM for use. If partitions or logical volumes are used for configuration, the VM cannot identify the disk. - -#### Configuration Example - -VM configuration files need to be modified for RDM. The following is a configuration example. - -- Virtual RDM - - The following is an example of mounting the SCSI disk **/dev/sdc** on the host to the VM as a virtual raw device: - - ``` - - - ... - - - - - -
- - ... - - - ``` - - -- Physical RDM - - The following is an example of mounting the SCSI disk **/dev/sdc** on the host to the VM as a physical raw device: - - ``` - - - ... - - - - - -
- - ... - - - ``` - - -### kworker Isolation and Binding - -#### Overview - -kworker is a per-CPU thread implemented by the Linux kernel. It is used to execute workqueue requests in the system. kworker threads will compete for physical core resources with vCPU threads, resulting in virtualization service performance jitter. To ensure that the VM can run stably and reduce the interference of kworker threads on the VM, you can bind kworker threads on the host to a specific CPU. - -#### Instructions - -You can modify the **/sys/devices/virtual/workqueue/cpumask** file to bind tasks in the workqueue to the CPU specified by **cpumasks**. Masks in **cpumask** are in hexadecimal format. For example, if you need to bind kworker to CPU0 to CPU7, run the following command to change the mask to **ff**: - -``` -# echo ff > /sys/devices/virtual/workqueue/cpumask -``` - -### HugePage Memory - -#### Overview - -Compared with traditional 4 KB memory paging, openEuler also supports 2 MB/1 GB memory paging. HugePage memory can effectively reduce TLB misses and significantly improve the performance of memory-intensive services. openEuler uses two technologies to implement HugePage memory. - -- Static HugePages - - The static HugePage requires that a static HugePage pool be reserved before the host OS is loaded. When creating a VM, you can modify the XML configuration file to specify that the VM memory is allocated from the static HugePage pool. The static HugePage ensures that all memory of a VM exists on the host as the HugePage to ensure physical continuity. However, the deployment difficulty is increased. After the page size of the static HugePage pool is changed, the host needs to be restarted for the change to take effect. The size of a static HugePage can be 2 MB or 1 GB. - - -- THP - - If the transparent HugePage \(THP\) mode is enabled, the VM automatically selects available 2 MB consecutive pages and automatically splits and combines HugePages when allocating memory. When no 2 MB consecutive pages are available, the VM selects available 64 KB \(AArch64 architecture\) or 4 KB \(x86\_64 architecture\) pages for allocation. By using THP, users do not need to be aware of it and 2 MB HugePages can be used to improve memory access performance. - - -If VMs use static HugePages, you can disable THP to reduce the overhead of the host OS and ensure stable VM performance. - -#### Instructions - -- Configure static HugePages. - - Before creating a VM, modify the XML file to configure a static HugePage for the VM. - - ``` - - - - - - ``` - - The preceding XML segment indicates that a 1 GB static HugePage is configured for the VM. - - ``` - - - - - - ``` - - The preceding XML segment indicates that a 2 MB static HugePage is configured for the VM. - -- Configure transparent HugePage. - - Dynamically enable the THP through sysfs. - - ``` - # echo always > /sys/kernel/mm/transparent_hugepage/enabled - ``` - - Dynamically disable the THP. - - ``` - # echo never > /sys/kernel/mm/transparent_hugepage/enabled - ``` - - -### PV-qspinlock - -#### Overview - -PV-qspinlock optimizes the spin lock in the virtual scenario of CPU overcommitment. It allows the hypervisor to set the vCPU in the lock context to the block state and wake up the corresponding vCPU after the lock is released. In this way, pCPU resources can be better used in the overcommitment scenario, and the compilation application scenario is optimized to reduce the compilation duration. - -#### Procedure - -Modify the /boot/efi/EFI/openEuler/grub.cfg configuration file of the VM, add arm_pvspin to the startup parameter in the command line, and restart the VM for the modification to take effect. After PV-qspinlock takes effect, run the dmesg command on the VM. The following information is displayed: - -``` -[ 0.000000] arm-pv: PV qspinlocks enabled -``` - ->![](./public_sys-resources/icon-note.gif) **Note:** ->PV-qspinlock is supported only when the operating systems of the host machine and VM are both openEuler-20.09 or later and the VM kernel compilation option CONFIG_PARAVIRT_SPINLOCKS is set to y (default value for openEuler). - -### Guest-Idle-Haltpoll - -#### Overview - -To ensure fairness and reduce power consumption, when the vCPU of the VM is idle, the VM executes the WFx/HLT instruction to exit to the host machine and triggers context switchover. The host machine determines whether to schedule other processes or vCPUs on the physical CPU or enter the energy saving mode. However, overheads of switching between a virtual machine and a host machine, additional context switching, and IPI wakeup are relatively high, and this problem is particularly prominent in services where sleep and wakeup are frequently performed. The Guest-Idle-Haltpoll technology indicates that when the vCPU of a VM is idle, the WFx/HLT is not executed immediately and VM-exit occurs. Instead, polling is performed on the VM for a period of time. During this period, the tasks of other vCPUs that share the LLC on the vCPU are woken up without sending IPI interrupts. This reduces the overhead of sending and receiving IPI interrupts and the overhead of VM-exit, thereby reducing the task wakeup latency. - ->![](public_sys-resources/icon-note.gif) **Note:** - The execution of the idle-haltpoll command by the vCPU on the VM increases the CPU overhead of the vCPU on the host machine. Therefore, it is recommended that the vCPU exclusively occupy physical cores on the host machine when this feature is enabled. - -#### Procedure - -The Guest-Idle-Haltpoll feature is disabled by default. The following describes how to enable this feature. - -1. Enable the Guest-Idle-Haltpoll feature. - - If the processor architecture of the host machine is x86, you can configure hint-dedicated in the XML file of the VM on the host machine to enable this feature. In this way, the status that the vCPU exclusively occupies the physical core can be transferred to the VM through the VM XML configuration. The host machine ensures the status of the physical core exclusively occupied by the vCPU. - - ``` - - ... - - - ... - - - - ... - - ``` - - Alternatively, set cpuidle\_haltpoll.force to Y in the kernel startup parameters of the VM to forcibly enable the function. This method does not require the host machine to configure the vCPU to exclusively occupy the physical core. - ``` - cpuidle_haltpoll.force=Y - ``` - - - If the processor architecture of the host machine is AArch64, this feature can be enabled only by configuring cpuidle\_haltpoll.force=Y haltpoll.enable=Y in the VM kernel startup parameters. - - ``` - cpuidle_haltpoll.force=Y haltpoll.enable=Y - ``` - -2. Check whether the Guest-Idle-Haltpoll feature takes effect. Run the following command on the VM. If haltpoll is returned, the feature has taken effect. - - ``` - # cat /sys/devices/system/cpu/cpuidle/current_driver - ``` - -3. (Optional) Set the Guest-Idle-Haltpoll parameter. - - The following configuration files are provided in the /sys/module/haltpoll/parameters/ directory of the VM. You can adjust the configuration parameters based on service characteristics. - - - guest\_halt\_poll\_ns: a global parameter that specifies the maximum polling duration after the vCPU is idle. The default value is 200000 (unit: ns). - - guest\_halt\_poll\_shrink: a divisor that is used to shrink the current vCPU guest\_halt\_poll\_ns when the wakeup event occurs after the global guest\_halt\_poll\_ns time. The default value is 2. - - guest\_halt\_poll\_grow: a multiplier that is used to extend the current vCPU guest\_halt\_poll\_ns when the wakeup event occurs after the current vCPU guest\_halt\_poll\_ns and before the global guest\_halt\_poll\_ns. The default value is 2. - - guest\_halt\_poll\_grow\_start: When the system is idle, the guest\_halt\_poll\_ns of each vCPU reaches 0. This parameter is used to set the initial value of the current vCPU guest\_halt\_poll\_ns to facilitate scaling in and scaling out of the vCPU polling duration. The default value is 50000 (unit: ns). - - guest\_halt\_poll\_allow\_shrink: a switch that is used to enable vCPU guest\_halt\_poll\_ns scale-in. The default value is Y. (Y indicates enabling the scale-in; N indicates disabling the scale-in.) - - You can run the following command as the user root to change the parameter values: In the preceding command, _value_ indicates the parameter value to be set, and _configFile_ indicates the corresponding configuration file. - - ``` - # echo value > /sys/module/haltpoll/parameters/configFile - ``` - - For example, to set the global guest\_halt\_poll\_ns to 200000 ns, run the following command: - - ``` - # echo 200000 > /sys/module/haltpoll/parameters/guest_halt_poll_ns - ``` - - -## security Best Practices - -### Libvirt Authentication - -#### Overview - -When a user uses libvirt remote invocation but no authentication is performed, any third-party program that connects to the host's network can operate VMs through the libvirt remote invocation mechanism. This poses security risks. To improve system security, openEuler provides the libvirt authentication function. That is, users can remotely invoke a VM through libvirt only after identity authentication. Only specified users can access the VM, thereby protecting VMs on the network. - -#### Enabling Libvirt Authentication - -By default, the libvirt remote invocation function is disabled on openEuler. This following describes how to enable the libvirt remote invocation and libvirt authentication functions. - -1. Log in to the host. -2. Modify the libvirt service configuration file **/etc/libvirt/libvirtd.conf** to enable the libvirt remote invocation and libvirt authentication functions. For example, to enable the TCP remote invocation that is based on the Simple Authentication and Security Layer \(SASL\) framework, configure parameters by referring to the following: - - ``` - #Transport layer security protocol. The value 0 indicates that the protocol is disabled, and the value 1 indicates that the protocol is enabled. You can set the value as needed. - listen_tls = 0 - #Enable the TCP remote invocation. To enable the libvirt remote invocation and libvirt authentication functions, set the value to 1. - listen_tcp = 1 - #User-defined protocol configuration for TCP remote invocation. The following uses sasl as an example. - auth_tcp = "sasl" - ``` - -3. Modify the **/etc/sasl2/libvirt.conf** configuration file to set the SASL mechanism and SASLDB. - - ``` - #Authentication mechanism of the SASL framework. - mech_list: digest-md5 - #Database for storing usernames and passwords - sasldb_path: /etc/libvirt/passwd.db - ``` - -4. Add the user for SASL authentication and set the password. Take the user **userName** as an example. The command is as follows: - - ``` - # saslpasswd2 -a libvirt userName - Password: - Again (for verification): - ``` - -5. Modify the **/etc/sysconfig/libvirtd** configuration file to enable the libvirt listening option. - - ``` - LIBVIRTD_ARGS="--listen" - ``` - -6. Restart the libvirtd service to make the modification to take effect. - - ``` - # systemctl restart libvirtd - ``` - -7. Check whether the authentication function for libvirt remote invocation takes effect. Enter the username and password as prompted. If the libvirt service is successfully connected, the function is successfully enabled. - - ``` - # virsh -c qemu+tcp://192.168.0.1/system - Please enter your authentication name: openeuler - Please enter your password: - Welcome to virsh, the virtualization interactive terminal. - - Type: 'help' for help with commands - 'quit' to quit - - virsh # - ``` - - -#### Managing SASL - -The following describes how to manage SASL users. - -- Query an existing user in the database. - - ``` - # sasldblistusers2 -f /etc/libvirt/passwd.db - user@localhost.localdomain: userPassword - ``` - -- Delete a user from the database. - - ``` - # saslpasswd2 -a libvirt -d user - ``` - - -### qemu-ga - -#### Overview - -QEMU guest agent \(qemu-ga\) is a daemon running within VMs. It allows users on a host OS to perform various management operations on the guest OS through outband channels provided by QEMU. The operations include file operations \(open, read, write, close, seek, and flush\), internal shutdown, VM suspend \(suspend-disk, suspend-ram, and suspend-hybrid\), and obtaining of VM internal information \(including the memory, CPU, NIC, and OS information\). - -In some scenarios with high security requirements, qemu-ga provides the blacklist function to prevent internal information leakage of VMs. You can use a blacklist to selectively shield some functions provided by qemu-ga. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The qemu-ga installation package is **qemu-guest-agent-**_xx_**.rpm**. It is not installed on openEuler by default. _xx_ indicates the actual version number. - -#### Procedure - -To add a qemu-ga blacklist, perform the following steps: - -1. Log in to the VM and ensure that the qemu-guest-agent service exists and is running. - - ``` - # systemctl status qemu-guest-agent |grep Active - Active: active (running) since Wed 2018-03-28 08:17:33 CST; 9h ago - ``` - -2. Query which **qemu-ga** commands can be added to the blacklist: - - ``` - # qemu-ga --blacklist ? - guest-sync-delimited - guest-sync - guest-ping - guest-get-time - guest-set-time - guest-info - ... - ``` - - -1. Set the blacklist. Add the commands to be shielded to **--blacklist** in the **/usr/lib/systemd/system/qemu-guest-agent.service** file. Use spaces to separate different commands. For example, to add the **guest-file-open** and **guest-file-close** commands to the blacklist, configure the file by referring to the following: - - ``` - [Service] - ExecStart=-/usr/bin/qemu-ga \ - --blacklist=guest-file-open guest-file-close - ``` - - -1. Restart the qemu-guest-agent service. - - ``` - # systemctl daemon-reload - # systemctl restart qemu-guest-agent - ``` - -2. Check whether the qemu-ga blacklist function takes effect on the VM, that is, whether the **--blacklist** parameter configured for the qemu-ga process is correct. - - ``` - # ps -ef|grep qemu-ga|grep -E "blacklist=|b=" - root 727 1 0 08:17 ? 00:00:00 /usr/bin/qemu-ga --method=virtio-serial --path=/dev/virtio-ports/org.qemu.guest_agent.0 --blacklist=guest-file-open guest-file-close guest-file-read guest-file-write guest-file-seek guest-file-flush -F/etc/qemu-ga/fsfreeze-hook - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >For more information about qemu-ga, visit [https://wiki.qemu.org/Features/GuestAgent](https://wiki.qemu.org/Features/GuestAgent). - - -### sVirt Protection - -#### Overview - -In a virtualization environment that uses the discretionary access control \(DAC\) policy only, malicious VMs running on hosts may attack the hypervisor or other VMs. To improve security in virtualization scenarios, openEuler uses sVirt for protection. sVirt is a security protection technology based on SELinux. It is applicable to KVM virtualization scenarios. A VM is a common process on the host OS. In the hypervisor, the sVirt mechanism labels QEMU processes corresponding to VMs with SELinux labels. In addition to types which are used to label virtualization processes and files, different categories are used to label different VMs. Each VM can access only file devices of the same category. This prevents VMs from accessing files and devices on unauthorized hosts or other VMs, thereby preventing VM escape and improving host and VM security. - -#### Enabling sVirt Protection - -1. Enable SELinux on the host. - 1. Log in to the host. - 2. Enable the SELinux function on the host. - 1. Modify the system startup parameter file **grub.cfg** to set **selinux** to **1**. - - ``` - selinux=1 - ``` - - 2. Modify **/etc/selinux/config** to set the **SELINUX** to **enforcing**. - - ``` - SELINUX=enforcing - ``` - - 3. Restart the host. - - ``` - # reboot - ``` - - - -1. Create a VM where the sVirt function is enabled. - 1. Add the following information to the VM configuration file: - - ``` - - ``` - - Or check whether the following configuration exists in the file: - - ``` - - ``` - - 2. Create a VM. - - ``` - # virsh define openEulerVM.xml - ``` - -2. Check whether sVirt is enabled. - - Run the following command to check whether sVirt protection has been enabled for the QEMU process of the running VM. If **svirt\_t:s0:c** exists, sVirt protection has been enabled. - - ``` - # ps -eZ|grep qemu |grep "svirt_t:s0:c" - system_u:system_r:svirt_t:s0:c200,c947 11359 ? 00:03:59 qemu-kvm - system_u:system_r:svirt_t:s0:c427,c670 13790 ? 19:02:07 qemu-kvm - ``` - - -### VM Trusted Boot - -#### Overview - -Trusted boot includes measure boot and remote attestation. The measure boot function is mainly provided by virtualization component. The remote attestation function is enabled by users who install related software (RA client) on VMs and set up the RA server. - -The two basic elements for measure boot are the root of trust (RoT) and chain of trust. The basic idea is to establish a RoT in the computer system. The trustworthiness of the RoT is ensured by physical security, technical security, and management security, that is, CRTM (Core Root of Trust for Measurement). A chain of trust is established, starting from the RoT to the BIOS/BootLoader, operating system, and then to the application. The measure boot and trust is performed by one level to the previous level. Finally, the trust is extended to the entire system. The preceding process looks like a chain, so it is called a chain of trust. - -The CRTM is the root of the measure boot and the first component of the system startup. No other code is used to check the integrity of the CRTM. Therefore, as the starting point of the chain of trust, it must be an absolutely trusted source of trust. The CRTM needs to be technically designed as a segment of read-only or strictly restricted code to defend against BIOS attacks and prevent remote injection of malicious code or modification of startup code at the upper layer of the operating system. In a physical host, the CPU microcode is used as the CRTM. In a virtualization environment, the sec part of the vBIOS is generally the CRTM. - -During startup, the previous component measures (calculates the hash value) the next component, and then extends the measurement value to the trusted storage area, for example, the PCR of the TPM. The CRTM measurement BootLoader extends the measurement value to the PCR, and the BootLoader measurement OS extends the measurement value to the PCR. - - - -#### Configuring the vTPM Device to Enable Measurement Startup - -**Installing the swtpm and libtpms Software** - -swtpm provides a TPM emulator (TPM 1.2 and TPM 2.0) that can be integrated into a virtualization environment. So far, it has been integrated into QEMU and serves as a prototype system in RunC. swtpm uses libtpms to provide TPM1.2 and TPM2.0 simulation functions. -Currently, openEuler20.09 provides the libtpms and swtpm sources. You can run the yum command to install them. - -``` -# yum install libtpms swtpm swtpm-devel swtpm-tools - -``` - - -**Configuring the vTPM Device for the VM** - -1. Add the following information to the AArch64 VM configuration file: - - ``` - - ... - - ... - - - - ... - - ... - - ``` - - Add the following information to the x86 VM configuration file: - - ``` - - ... - - ... - - - - ... - - ... - - ``` - >![](public_sys-resources/icon-note.gif) **NOTE:** - > Currently, openEuler20.09 AArch64 version does not support ACPI feature in VM trusted boot process. Therefore, do not configure the ACPI feature for VMs. Otherwise, the vTPM device cannot be identified after the VM is started. - -2. Create the VM. - - ``` - # virsh define MeasuredBoot.xml - ``` -3. Start the VM. - - Before starting the VM, run the chmod command to grant the following permission to the /var/lib/swtpm-localca/ directory. Otherwise, the libvirt cannot start the swtpm. - - ``` - # chmod -R 777 /var/lib/swtpm-localca/ - # - # virsh start MeasuredbootVM - ``` - - -**Confirming that the Measure Boot Is Successfully Enabled** - -The vBIOS determines whether to enable the measure boot function. Currently, the vBIOS in openEuler20.09 has the measure boot capability. If the host machine uses the edk2 component of another version, check whether the edk2 component supports the measure boot function. - -Log in to the VM as user root and check whether the TPM driver, tpm2-tss protocol stack, and tpm2-tools are installed on the VM. -By default, the tpm driver (tpm_tis.ko), tpm2-tss protocol stack, and tpm2-tools are installed in openEuler20.09. If another OS is used, run the following command to check whether the driver and related tools are installed: - - -``` -# lsmod |grep tpm -# tpm_tis 16384 0 -# -# yum list installed | grep -E 'tpm2-tss|tpm2-tools' -# -# yum install tpm2-tss tpm2-tools -``` -You can run the tpm2_pcrread (tpm2_pcrlist in tpm2_tools of earlier versions) command to list all PCR values. - -``` -# tpm2_pcrread -sha1 : - 0 : fffdcae7cef57d93c5f64d1f9b7f1879275cff55 - 1 : 5387ba1d17bba5fdadb77621376250c2396c5413 - 2 : b2a83b0ebf2f8374299a5b2bdfc31ea955ad7236 - 3 : b2a83b0ebf2f8374299a5b2bdfc31ea955ad7236 - 4 : e5d40ace8bb38eb170c61682eb36a3020226d2c0 - 5 : 367f6ea79688062a6df5f4737ac17b69cd37fd61 - 6 : b2a83b0ebf2f8374299a5b2bdfc31ea955ad7236 - 7 : 518bd167271fbb64589c61e43d8c0165861431d8 - 8 : af65222affd33ff779780c51fa8077485aca46d9 - 9 : 5905ec9fb508b0f30b2abf8787093f16ca608a5a - 10 : 0000000000000000000000000000000000000000 - 11 : 0000000000000000000000000000000000000000 - 12 : 0000000000000000000000000000000000000000 - 13 : 0000000000000000000000000000000000000000 - 14 : 0000000000000000000000000000000000000000 - 15 : 0000000000000000000000000000000000000000 - 16 : 0000000000000000000000000000000000000000 - 17 : ffffffffffffffffffffffffffffffffffffffff - 18 : ffffffffffffffffffffffffffffffffffffffff - 19 : ffffffffffffffffffffffffffffffffffffffff - 20 : ffffffffffffffffffffffffffffffffffffffff - 21 : ffffffffffffffffffffffffffffffffffffffff - 22 : ffffffffffffffffffffffffffffffffffffffff - 23 : 0000000000000000000000000000000000000000 -sha256 : - 0 : d020873038268904688cfe5b8ccf8b8d84c1a2892fc866847355f86f8066ea2d - 1 : 13cebccdb194dd916f2c0c41ec6832dfb15b41a9eb5229d33a25acb5ebc3f016 - 2 : 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969 - 3 : 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969 - 4 : 07f9074ccd4513ef1cafd7660f9afede422b679fd8ad99d25c0659eba07cc045 - 5 : ba34c80668f84407cd7f498e310cc4ac12ec6ec43ea8c93cebb2a688cf226aff - 6 : 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969 - 7 : 65caf8dd1e0ea7a6347b635d2b379c93b9a1351edc2afc3ecda700e534eb3068 - 8 : f440af381b644231e7322babfd393808e8ebb3a692af57c0b3a5d162a6e2c118 - 9 : 54c08c8ba4706273f53f90085592f7b2e4eaafb8d433295b66b78d9754145cfc - 10 : 0000000000000000000000000000000000000000000000000000000000000000 - 11 : 0000000000000000000000000000000000000000000000000000000000000000 - 12 : 0000000000000000000000000000000000000000000000000000000000000000 - 13 : 0000000000000000000000000000000000000000000000000000000000000000 - 14 : 0000000000000000000000000000000000000000000000000000000000000000 - 15 : 0000000000000000000000000000000000000000000000000000000000000000 - 16 : 0000000000000000000000000000000000000000000000000000000000000000 - 17 : ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - 18 : ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - 19 : ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - 20 : ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - 21 : ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - 22 : ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - 23 : 0000000000000000000000000000000000000000000000000000000000000000 -``` diff --git a/docs/en/docs/Virtualization/environment-preparation.md b/docs/en/docs/Virtualization/environment-preparation.md deleted file mode 100644 index 38f159cecac279ff014175321f2bfccdced114bd..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/environment-preparation.md +++ /dev/null @@ -1,330 +0,0 @@ -# Environment Preparation - -- [Environment Preparation](#environment-preparation) - - [Preparing a VM Image](#preparing-a-vm-image) - - [Preparing the VM Network](#preparing-the-vm-network) - - [Preparing Boot Firmware](#preparing-boot-firmware) - -## Preparing a VM Image - -### Overview - -A VM image is a file that contains a virtual disk that has been installed and can be used to start the OS. VM images are in different formats, such as raw and qcow2. Compared with the raw format, the qcow2 format occupies less space and supports features such as snapshot, copy-on-write, AES encryption, and zlib compression. However, the performance of the qcow2 format is slightly lower than that of the raw format. The qemu-img tool is used to create image files. This section uses the qcow2 image file as an example to describe how to create a VM image. - -### Creating an Image - -To create a qcow2 image file, perform the following steps: - -1. Install the **qemu-img** software package. - - ``` - # yum install -y qemu-img - ``` - -2. Run the **create** command of the qemu-img tool to create an image file. The command format is as follows: - - ``` - $ qemu-img create -f -o - ``` - - The parameters are described as follows: - - - _imgFormat_: Image format. The value can be **raw** or **qcow2**. - - _fileOption_: File option, which is used to set features of an image file, such as specifying a backend image file, compression, and encryption. - - _fileName_: File name. - - _diskSize_: Disk size, which specifies the size of a block disk. The unit can be K, M, G, or T, indicating KiB, MiB, GiB, or TiB. - - For example, to create an image file openEuler-imge.qcow2 whose disk size is 4 GB and format is qcow2, the command and output are as follows: - - ``` - $ qemu-img create -f qcow2 openEuler-image.qcow2 4G - Formatting 'openEuler-image.qcow2', fmt=qcow2 size=4294967296 cluster_size=65536 lazy_refcounts=off refcount_bits=16 - ``` - - -### Changing the Image Disk Space - -If a VM requires larger disk space, you can use the qemu-img tool to change the disk space of the VM image. The method is as follows: - -1. Run the following command to query the disk space of the VM image: - - ``` - # qemu-img info - ``` - - For example, if the command and output for querying the disk space of the openEuler-image.qcow2 image are as follows, the disk space of the image is 4 GiB. - - ``` - # qemu-img info openEuler-image.qcow2 - image: openEuler-image.qcow2 - file format: qcow2 - virtual size: 4.0G (4294967296 bytes) - disk size: 196K - cluster_size: 65536 - Format specific information: - compat: 1.1 - lazy refcounts: false - refcount bits: 16 - corrupt: false - ``` - -2. Run the following command to change the image disk space. In the command, _imgFiLeName_ indicates the image name, and **+** and **-** indicate the image disk space to be increased and decreased, respectively. The unit is KB, MB, GB, and T, indicating KiB, MiB, GiB, and TiB, respectively. - - ``` - # qemu-img resize [+|-] - ``` - - For example, to expand the disk space of the openEuler-image.qcow2 image to 24 GiB, that is, to add 20 GiB to the original 4 GiB, the command and output are as follows: - - ``` - # qemu-img resize openEuler-image.qcow2 +20G - Image resized. - ``` - -3. Run the following command to check whether the image disk space is changed successfully: - - ``` - # qemu-img info - ``` - - For example, if the openEuler-image.qcow2 image disk space has been expanded to 24 GiB, the command and output are as follows: - - ``` - # qemu-img info openEuler-image.qcow2 - image: openEuler-image.qcow2 - file format: qcow2 - virtual size: 24G (25769803776 bytes) - disk size: 200K - cluster_size: 65536 - Format specific information: - compat: 1.1 - lazy refcounts: false - refcount bits: 16 - corrupt: false - ``` - - - -## Preparing the VM Network - -### Overview - -To enable the VM to communicate with external networks, you need to configure the network environment for the VM. KVM virtualization supports multiple types of bridges, such as Linux bridge and Open vSwitch bridge. As shown in [Figure 1](#fig1785384714917), the data transmission path is **VM \> virtual NIC device \> Linux bridge or Open vSwitch bridge \> physical NIC**. In addition to configuring virtual NICs \(vNICs\) for VMs, creating a bridge for a host is the key to connecting to a virtualized network. - -This section describes how to set up a Linux bridge and an Open vSwitch bridge to connect a VM to the network. You can select a bridge type based on the site requirements. - -**Figure 1** Virtual network structure -![](./figures/virtual-network-structure.png "virtual-network-structure") - -### Setting Up a Linux Bridge - -The following describes how to bind the physical NIC eth0 to the Linux bridge br0. - -1. Install the **bridge-utils** software package. - - The Linux bridge is managed by the brctl tool. The corresponding installation package is bridge-utils. The installation command is as follows: - - ``` - # yum install -y bridge-utils - ``` - -2. Create bridge br0. - - ``` - # brctl addbr br0 - ``` - -3. Bind the physical NIC eth0 to the Linux bridge. - - ``` - # brctl addif br0 eth0 - ``` - -4. After eth0 is connected to the bridge, the IP address of eth0 is set to 0.0.0.0. - - ``` - # ifconfig eth0 0.0.0.0 - ``` - -5. Set the IP address of br0. - - If a DHCP server is available, set a dynamic IP address through the dhclient. - - ``` - # dhclient br0 - ``` - - - If no DHCP server is available, configure a static IP address for br0. For example, set the static IP address to 192.168.1.2 and subnet mask to 255.255.255.0. - - ``` - # ifconfig br0 192.168.1.2 netmask 255.255.255.0 - ``` - - - -### Setting Up an Open vSwitch Bridge - -The Open vSwitch bridge provides more convenient automatic orchestration capabilities. This section describes how to install network virtualization components to set up an Open vSwitch bridge. - -**1. Install the Open vSwitch component.** - -If the Open vSwitch is used to provide virtual network, you need to install the Open vSwitch network virtualization component. - -1. Install the Open vSwitch component. - - ``` - # yum install -y openvswitch - ``` - -2. Start the Open vSwitch service. - - ``` - # systemctl start openvswitch - ``` - - -**2. Check whether the installation is successful.** - -Check whether the Open vSwitch components. - -1. Check whether the openvswitch component is successfully installed. If the installation is successful, the software package information is displayed. The command and output are as follows: - - ``` - # rpm -qi openvswitch - Name : openvswitch - Version : 2.11.1 - Release : 1 - Architecture: aarch64 - Install Date: Thu 15 Aug 2019 05:08:35 PM CST - Group : System Environment/Daemons - Size : 6051185 - License : ASL 2.0 - Signature : (none) - Source RPM : openvswitch-2.11.1-1.src.rpm - Build Date : Thu 08 Aug 2019 05:24:46 PM CST - Build Host : armbuild10b247b121b105 - Relocations : (not relocatable) - Vendor : Nicira, Inc. - URL : http://www.openvswitch.org/ - Summary : Open vSwitch daemon/database/utilities - Description : - Open vSwitch provides standard network bridging functions and - support for the OpenFlow protocol for remote per-flow control of - traffic. - ``` - -2. Check whether the Open vSwitch service is started successfully. If the service is in the **Active** state, the service is started successfully. You can use the command line tool provided by the Open vSwitch. The command and output are as follows: - - ``` - # systemctl status openvswitch - ● openvswitch.service - LSB: Open vSwitch switch - Loaded: loaded (/etc/rc.d/init.d/openvswitch; generated) - Active: active (running) since Sat 2019-08-17 09:47:14 CST; 4min 39s ago - Docs: man:systemd-sysv-generator(8) - Process: 54554 ExecStart=/etc/rc.d/init.d/openvswitch start (code=exited, status=0/SUCCESS) - Tasks: 4 (limit: 9830) - Memory: 22.0M - CGroup: /system.slice/openvswitch.service - ├─54580 ovsdb-server: monitoring pid 54581 (healthy) - ├─54581 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate> - ├─54602 ovs-vswitchd: monitoring pid 54603 (healthy) - └─54603 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/open> - ``` - - -**3. Set up an Open vSwitch bridge** - -The following describes how to set up an Open vSwitch layer-1 bridge br0. - -1. Create the Open vSwitch bridge br0. - - ``` - # ovs-vsctl add-br br0 - ``` - -2. Add the physical NIC eth0 to br0. - - ``` - # ovs-vsctl add-port br0 eth0 - ``` - -3. After eth0 is connected to the bridge, the IP address of eth0 is set to 0.0.0.0. - - ``` - # ifconfig eth0 0.0.0.0 - ``` - -4. Assign an IP address to OVS bridge br0. - - If a DHCP server is available, set a dynamic IP address through the dhclient. - - ``` - # dhclient br0 - ``` - - - If no DHCP server is available, configure a static IP address for br0, for example, 192.168.1.2. - - ``` - # ifconfig br0 192.168.1.2 - ``` - - - -## Preparing Boot Firmware - -### Overview - -The boot mode varies depending on the architecture. x86 servers support the Unified Extensible Firmware Interface \(UFEI\) and BIOS boot modes, and AArch64 servers support only the UFEI boot mode. By default, boot files corresponding to the BIOS mode have been installed on openEuler. No additional operations are required. This section describes how to install boot files corresponding to the UEFI mode. - -The Unified Extensible Firmware Interface \(UEFI\) is a new interface standard used for power-on auto check and OS boot. It is an alternative to the traditional BIOS. EDK II is a set of open source code that implements the UEFI standard. In virtualization scenarios, the EDK II tool set is used to start a VM in UEFI mode. Before using the EDK II tool, you need to install the corresponding software package before starting a VM. This section describes how to install the EDK II tool. - -### Installation Methods - -If the UEFI mode is used, the tool set EDK II needs to be installed. The installation package for the AArch64 architecture is **edk2-aarch64**, and that for the x86 architecture is **edk2-ovmf**. This section uses the AArch64 architecture as an example to describe the installation method. For the x86 architecture, you only need to replace **edk2-aarch64** with **edk2-ovmf**. - -1. Run the following command to install the **edk** software package: - - In the AArch64 architecture, the **edk2** package name is **edk2-aarch64**. - - ``` - # yum install -y edk2-aarch64 - ``` - - In the x86\_64 architecture, the **edk2** package name is **edk2-ovmf**. - - ``` - # yum install -y edk2-ovmf - ``` - -2. Run the following command to check whether the **edk** software package is successfully installed: - - In the AArch64 architecture, the command is as follows: - - ``` - # rpm -qi edk2-aarch64 - ``` - - If information similar to the following is displayed, the **edk** software package is successfully installed: - - ``` - Name : edk2-aarch64 - Version : 20180815gitcb5f4f45ce - Release : 1.oe3 - Architecture: noarch - Install Date: Mon 22 Jul 2019 04:52:33 PM CST - Group : Applications/Emulators - ``` - - In the x86\_64 architecture, the command is as follows: - - ``` - # rpm -qi edk2-ovmf - ``` - - If information similar to the following is displayed, the **edk** software package is successfully installed: - - ``` - Name : edk2-ovmf - Version : 201908 - Release : 6.oe1 - Architecture: noarch - Install Date: Thu 19 Mar 2020 09:09:06 AM CST - ``` diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP1.png b/docs/en/docs/Virtualization/figures/CertEnrollP1.png deleted file mode 100644 index 536e0618a3ab5b70937292205242a08237e34712..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP1.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP2.png b/docs/en/docs/Virtualization/figures/CertEnrollP2.png deleted file mode 100644 index 0557c8782960188dbe9d84a1d0e66c9b45d2b303..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP2.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP3.png b/docs/en/docs/Virtualization/figures/CertEnrollP3.png deleted file mode 100644 index 326fcf1e8d5e3c795ebcde286d8e0fef14bec7d1..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP3.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP4.png b/docs/en/docs/Virtualization/figures/CertEnrollP4.png deleted file mode 100644 index bc77c038e1e3a5ec30d7ba4f805ca937792e9327..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP4.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP5.png b/docs/en/docs/Virtualization/figures/CertEnrollP5.png deleted file mode 100644 index 0f22b3cbd84f7c93f74898a926bc3e32f231667f..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP5.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP6.png b/docs/en/docs/Virtualization/figures/CertEnrollP6.png deleted file mode 100644 index 08235013ca71f1ec51e9af2f143629d1a6132fe9..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP6.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP7.png b/docs/en/docs/Virtualization/figures/CertEnrollP7.png deleted file mode 100644 index f934521d59dd4a75449fcb2ca8abc54045b9102b..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP7.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/CertEnrollP8.png b/docs/en/docs/Virtualization/figures/CertEnrollP8.png deleted file mode 100644 index 9a8158e3378bf25dee05b892cc60f424542455d7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/CertEnrollP8.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/OSBootFlow.png b/docs/en/docs/Virtualization/figures/OSBootFlow.png deleted file mode 100644 index f496c5675c72359e5160384c766a11399b04bfa6..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/OSBootFlow.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/SecureBootFlow.png b/docs/en/docs/Virtualization/figures/SecureBootFlow.png deleted file mode 100644 index d639975800752c6eca6765a416c256a4752fb590..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/SecureBootFlow.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/en-us_image_0218587435.png b/docs/en/docs/Virtualization/figures/en-us_image_0218587435.png deleted file mode 100644 index a6107f2308d194c92ebe75b58e9125819e7fe9eb..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/en-us_image_0218587435.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/en-us_image_0218587436.png b/docs/en/docs/Virtualization/figures/en-us_image_0218587436.png deleted file mode 100644 index 28a8d25b19c5a5ed043a8f4701b8f920de365ea2..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/en-us_image_0218587436.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/kvm-architecture.png b/docs/en/docs/Virtualization/figures/kvm-architecture.png deleted file mode 100644 index 74cc91f2944b4ed5404edf036b1d71cd84df7e29..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/kvm-architecture.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/status-transition-diagram.png b/docs/en/docs/Virtualization/figures/status-transition-diagram.png deleted file mode 100644 index acbf4df149f57f43fbbcbb746980bd4748fb71d9..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/status-transition-diagram.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/virtual-network-structure.png b/docs/en/docs/Virtualization/figures/virtual-network-structure.png deleted file mode 100644 index 8a884a9212f8e35f545cfa01dc7320d145844390..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/virtual-network-structure.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/figures/virtualized-architecture.png b/docs/en/docs/Virtualization/figures/virtualized-architecture.png deleted file mode 100644 index 2e8b01628fb51bb6cc1162d6158259192506bc3a..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/figures/virtualized-architecture.png and /dev/null differ diff --git a/docs/en/docs/Virtualization/installation-to-virtualization.md b/docs/en/docs/Virtualization/installation-to-virtualization.md deleted file mode 100644 index 6e8d1e059a89eef837d3cb748532e9233c0139aa..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/installation-to-virtualization.md +++ /dev/null @@ -1,147 +0,0 @@ -# Installation Guide - -This chapter describes how to install virtualization components in openEuler. - -- [Installation Guide](#installation-guide) - - [Minimum Hardware Requirements](#minimum-hardware-requirements) - - [Installing Core Virtualization Components](#installing-core-virtualization-components) - - [Installation Methods](#installation-methods) - - [Verifying the Installation](#verifying-the-installation) - - -## Minimum Hardware Requirements - -The minimum hardware requirements for installing virtualization components on openEuler are as follows: - -- AArch64 processor architecture: ARMv8 or later, supporting virtualization expansion -- x86\_64 processor architecture, supporting VT-x -- 2-core CPU -- 4 GB memory -- 16 GB available disk space - -## Installing Core Virtualization Components - -### Installation Methods - -#### Prerequisites - -- The yum source has been configured. For details, see _openEuler Administrator Guide_. -- Only the administrator has permission to perform the installation. - -#### Procedure - -1. Install the QEMU component. - - ``` - # yum install -y qemu - ``` - -2. Install the libvirt component. - - ``` - # yum install -y libvirt - ``` - -3. Start the libvirtd service. - - ``` - # systemctl start libvirtd - ``` - - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The KVM module is integrated in the openEuler kernel and does not need to be installed separately. - -### Verifying the Installation - -1. Check whether the kernel supports KVM virtualization, that is, check whether the **/dev/kvm** and **/sys/module/kvm** files exist. The command and output are as follows: - - ``` - # ls /dev/kvm - /dev/kvm - ``` - - ``` - # ls /sys/module/kvm - parameters uevent - ``` - - If the preceding files exist, the kernel supports KVM virtualization. If the preceding files do not exist, KVM virtualization is not enabled during kernel compilation. In this case, you need to use the Linux kernel that supports KVM virtualization. - -2. Check whether QEMU is successfully installed. If the installation is successful, the QEMU software package information is displayed. The command and output are as follows: - - ``` - # rpm -qi qemu - Name : qemu - Epoch : 2 - Version : 4.0.1 - Release : 10 - Architecture: aarch64 - Install Date: Wed 24 Jul 2019 04:04:47 PM CST - Group : Unspecified - Size : 16869484 - License : GPLv2 and BSD and MIT and CC-BY - Signature : (none) - Source RPM : qemu-4.0.0-1.src.rpm - Build Date : Wed 24 Jul 2019 04:03:52 PM CST - Build Host : localhost - Relocations : (not relocatable) - URL : http://www.qemu.org - Summary : QEMU is a generic and open source machine emulator and virtualizer - Description : - QEMU is a generic and open source processor emulator which achieves a good - emulation speed by using dynamic translation. QEMU has two operating modes: - - * Full system emulation. In this mode, QEMU emulates a full system (for - example a PC), including a processor and various peripherials. It can be - used to launch different Operating Systems without rebooting the PC or - to debug system code. - * User mode emulation. In this mode, QEMU can launch Linux processes compiled - for one CPU on another CPU. - - As QEMU requires no host kernel patches to run, it is safe and easy to use. - ``` - -3. Check whether libvirt is successfully installed. If the installation is successful, the libvirt software package information is displayed. The command and output are as follows: - - ``` - # rpm -qi libvirt - Name : libvirt - Version : 5.5.0 - Release : 1 - Architecture: aarch64 - Install Date: Tue 30 Jul 2019 04:56:21 PM CST - Group : Unspecified - Size : 0 - License : LGPLv2+ - Signature : (none) - Source RPM : libvirt-5.5.0-1.src.rpm - Build Date : Mon 29 Jul 2019 08:14:57 PM CST - Build Host : 71e8c1ce149f - Relocations : (not relocatable) - URL : https://libvirt.org/ - Summary : Library providing a simple virtualization API - Description : - Libvirt is a C toolkit to interact with the virtualization capabilities - of recent versions of Linux (and other OSes). The main package includes - the libvirtd server exporting the virtualization support. - ``` - -4. Check whether the libvirt service is started successfully. If the service is in the **Active** state, the service is started successfully. You can use the virsh command line tool provided by the libvirt. The command and output are as follows: - - ``` - # systemctl status libvirtd - ● libvirtd.service - Virtualization daemon - Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled) - Active: active (running) since Tue 2019-08-06 09:36:01 CST; 5h 12min ago - Docs: man:libvirtd(8) - https://libvirt.org - Main PID: 40754 (libvirtd) - Tasks: 20 (limit: 32768) - Memory: 198.6M - CGroup: /system.slice/libvirtd.service - ─40754 /usr/sbin/libvirtd - - ``` - - diff --git a/docs/en/docs/Virtualization/introduction-to-virtualization.md b/docs/en/docs/Virtualization/introduction-to-virtualization.md deleted file mode 100644 index 0abe43ba5f4a5d869f2c8a5c68cbac325d9a4f83..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/introduction-to-virtualization.md +++ /dev/null @@ -1,90 +0,0 @@ -# Introduction to Virtualization - -## Overview - -In computer technologies, virtualization is a resource management technology. It abstracts various physical resources \(such as processors, memory, disks, and network adapters\) of a computer, converts the resources, and presents the resources for segmentation and combination into one or more computer configuration environments. This resource management technology breaks the inseparable barrier of the physical structure, and makes these resources not restricted by the architecture, geographical or physical configuration of the existing resources after virtualization. In this way, users can better leverage the computer hardware resources and maximize the resource utilization. - -Virtualization enables multiple virtual machines \(VMs\) to run on a physical server. The VMs share the processor, memory, and I/O resources of the physical server, but are logically isolated from each other. In the virtualization technology, the physical server is called a host machine, the VM running on the host machine is called a guest, and the operating system \(OS\) running on the VM is called a guest OS. A layer of software, called the virtualization layer, exists between a host machine and a VM to simulate virtual hardware. This virtualization layer is called a VM monitor, as shown in the following figure. - -**Figure 1** Virtualized architecture -![](./figures/virtualized-architecture.png "virtualized-architecture") - -## Virtualized Architecture - -Currently, mainstream virtualization technologies are classified into two types based on the implementation structure of the Virtual Machine Monitor \(VMM\): - -- Hypervisor model - - In this model, VMM is considered as a complete operating system \(OS\) and has the virtualization function. VMM directly manages all physical resources, including processors, memory, and I/O devices. - -- Host model - - In this model, physical resources are managed by a host OS, which is a traditional OS, such as Linux and Windows. The host OS does not provide the virtualization capability. The VMM that provides the virtualization capability runs on the host OS as a driver or software of the system. The VMM invokes the host OS service to obtain resources and simulate the processor, memory, and I/O devices. The virtualization implementation of this model includes KVM and Virtual Box. - - -Kernel-based Virtual Machine \(KVM\) is a kernel module of Linux. It makes Linux a hypervisor. [Figure 2](#fig310953013541) shows the KVM architecture. KVM does not simulate any hardware device. It is used to enable virtualization capabilities provided by the hardware, such as Intel VT-x, AMD-V, Arm virtualization extensions. The user-mode QEMU simulates the mainboard, memory, and I/O devices. The user-mode QEMU works with the kernel KVM module to simulate VM hardware. The guest OS runs on the hardware simulated by the QEMU and KVM. - -**Figure 2** KVM architecture -![](./figures/kvm-architecture.png "kvm-architecture") - -## Virtualization Components - -Virtualization components provided in the openEuler software package: - -- KVM: provides the core virtualization infrastructure to make the Linux system a hypervisor. Multiple VMs can run on the same host at the same time. -- QEMU: simulates a processor and provides a set of device models to work with KVM to implement hardware-based virtualization simulation acceleration. -- Libvirt: provides a tool set for managing VMs, including unified, stable, and open application programming interfaces \(APIs\), daemon process \(libvirtd\), and default command line management tool \(virsh\). -- Open vSwitch: provides a virtual network tool set for VMs, supports programming extension and standard management interfaces and protocols \(such as NetFlow, sFlow, IPFIX, RSPAN, CLI, LACP, and 802.1ag\). - -## Virtualization Characteristics - -Virtualization has the following characteristics: - -- Partition - - Virtualization can logically divide software on a physical server to run multiple VMs \(virtual servers\) with different specifications. - - -- Isolation - - Virtualization can simulate virtual hardware and provide hardware conditions for VMs to run complete OSs. The OSs of each VM are independent and isolated from each other. For example, if the OS of a VM breaks down due to a fault or malicious damage, the OSs and applications of other VMs are not affected. - - -- Encapsulation - - Encapsulation is performed on a per VM basis. The excellent encapsulation capability makes VMs more flexible than physical machines. Functions such as live migration, snapshot, and cloning of VMs can be realized, implementing quick deployment and automatic O&M of data centers. - - -- Hardware-irrelevant - - After being abstracted by the virtualization layer, VMs are not directly bound to underlying hardware and can run on other servers without being modified. - - -## Virtualization Advantages - -Virtualization brings the following benefits to infrastructure of the data center: - -- Flexibility and scalability - - Users can dynamically allocate and reclaim resources based to meet dynamic service requirements. In addition, users can plan different VM specifications based on product requirements and adjust the scale without changing the physical resource configuration. - - -- Higher availability and better O&M methods - - Virtualization provides O&M methods such as live migration, snapshot, live upgrade, and automatic DR. Physical resources can be deleted, upgraded, or changed without affecting users, improving service continuity and implementing automatic O&M. - - -- Security hardening - - Virtualization provides OS-level isolation and hardware-based processor operation privilege-level control. Compared with simple sharing mechanisms, virtualization provides higher security and implements controllable and secure access to data and services. - - -- High resource utilization - - Virtualization supports dynamic sharing of physical resources and resource pools, improving resource utilization. - - -## openEuler Virtualization - -openEuler provides KVM virtualization components that support the AArch64 and x86\_64 processor architectures. - diff --git a/docs/en/docs/Virtualization/managing-devices.md b/docs/en/docs/Virtualization/managing-devices.md deleted file mode 100644 index d9044dd93cf124863e74f723dfdc279744a3f6ea..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/managing-devices.md +++ /dev/null @@ -1,670 +0,0 @@ -# Managing Devices - -- [Managing Devices](#managing-devices) - - [Configuring a PCIe Controller for a VM](#configuring-a-pcie-controller-for-a-vm) - - [Managing Virtual Disks](#managing-virtual-disks) - - [Managing vNICs](#managing-vnics) - - [Configuring a Virtual Serial Port](#configuring-a-virtual-serial-port) - - [Managing Device Passthrough](#managing-device-passthrough) - - [PCI Passthrough](#pci-passthrough) - - [SR-IOV Passthrough](#sr-iov-passthrough) - - [Managing VM USB](#managing-vm-usb) - - [Configuring USB Controllers](#configuring-usb-controllers) - - [Configuring a USB Passthrough Device](#configuring-a-usb-passthrough-device) - - [Storing Snapshots](#storing-snapshots) - - -## Configuring a PCIe Controller for a VM - -### Overview - -Thr NIC, disk controller, and PCIe pass-through devices in a VM must be mounted to a PCIe root port. Each root port corresponds to a PCIe slot. The devices mounted to the root port support hot swap, but the root port does not support hot swap. Therefore, users need to consider the hot swap requirements and plan the maximum number of PCIe root ports reserved for the VM. Before the VM is started, the root port is statically configured. - -### Configuring the PCIe Root, PCIe Root Port, and PCIe-PCI-Bridge - -The VM PCIe controller is configured using the XML file. The **model** corresponding to PCIe root, PCIe root port, and PCIe-PCI-bridge in the XML file are **pcie-root**, **pcie-root-port**, and **pcie-to-pci-bridge**, respectively. - -- Simplified configuration method - - Add the following contents to the XML file of the VM. Other attributes of the controller are automatically filled by libvirt. - - ``` - - - - - - - ``` - - The **pcie-root** and **pcie-to-pci-bridge** occupy one **index** respectively. Therefore, the final **index** is the number of required **root ports + 1**. - -- Complete configuration method - - Add the following contents to the XML file of the VM: - - ``` - - - - -
- - - -
- - - - -
- - - ``` - - In the preceding contents: - - - The **chassis** and **port** attributes of the root port must be in ascending order. Because a PCIe-PCI-bridge is inserted in the middle, the **chassis** number skips **2**, but the **port** numbers are still consecutive. - - The **address function** of the root port ranges from **0\*0** to **0\*7**. - - A maximum of eight functions can be mounted to each slot. When the slot is full, the slot number increases. - - The complete configuration method is complex. Therefore, the simplified one is recommended. - - -## Managing Virtual Disks - -### Overview - -Virtual disk types include virtio-blk, virtio-scsi, and vhost-scsi. virtio-blk simulates a block device, and virtio-scsi and vhost-scsi simulate SCSI devices. - -- virtio-blk: It can be used for common system disk and data disk. In this configuration, the virtual disk is presented as **vd\[a-z\]** or **vd\[a-z\]\[a-z\]** in the VM. -- virtio-scsi: It is recommended for common system disk and data disk. In this configuration, the virtual disk is presented as **sd\[a-z\]** or **sd\[a-z\]\[a-z\]** in the VM. -- vhost-scsi: It is recommended for the virtual disk that has high performance requirements. In this configuration, the virtual disk is presented as **sd\[a-z\]** or **sd\[a-z\]\[a-z\]** on the VM. - -### Procedure - -For details about how to configure a virtual disk, see [3.2.4.1 Storage Devices](#storage-devices). This section uses the virtio-scsi disk as an example to describe how to attach and detach a virtual disk. - -- Attach a virtio-scsi disk. - - Run the **virsh attach-device** command to attach the virtio-scsi virtual disk. - - ``` - # virsh attach-device - ``` - - The preceding command can be used to attach a disk to a VM online. The disk information is specified in the **attach-device.xml** file. The following is an example of the **attach-device.xml** file: - - ``` - ### attach-device.xml ### - - - - - -
- - ``` - - The disk attached by running the preceding commands becomes invalid after the VM is shut down and restarted. If you need to permanently attach a virtual disk to a VM, run the **virsh attach-device** command with the **--config** parameter. - -- Detach a virtio-scsi disk. - - If a disk attached online is no longer used, run the **virsh detach** command to dynamically detach it. - - ``` - # virsh detach-device - ``` - - **detach-device.xml** specifies the XML information of the disk to be detached, which must be the same as the XML information during dynamic attachment. - - -## Managing vNICs - -### Overview - -The vNIC types include virtio-net, vhost-net, and vhost-user. After creating a VM, you may need to attach or detach a vNIC. openEuler supports NIC hot swap, which can change the network throughput and improve system flexibility and scalability. - -### Procedure - -For details about how to configure a virtual NIC, see [3.2.4.2 Network Devices](#network-device). This section uses the vhost-net NIC as an example to describe how to attach and detach a vNIC. - -- Attach the vhost-net NIC. - - Run the **virsh attach-device** command to attach the vhost-net vNIC. - - ``` - # virsh attach-device - ``` - - The preceding command can be used to attach a vhost-net NIC to a running VM. The NIC information is specified in the **attach-device.xml** file. The following is an example of the **attach-device.xml** file: - - ``` - ### attach-device.xml ### - - - - - - - - ``` - - The vhost-net NIC attached using the preceding commands becomes invalid after the VM is shut down and restarted. If you need to permanently attach a vNIC to a VM, run the **virsh attach-device** command with the **--config** parameter. - -- Detach the vhost-net NIC. - - If a NIC attached online is no longer used, run the **virsh detach** command to dynamically detach it. - - ``` - # virsh detach-device - ``` - - **detach-device.xml** specifies the XML information of the vNIC to be detached, which must be the same as the XML information during dynamic attachment. - - -## Configuring a Virtual Serial Port - -### Overview - -In a virtualization environment, VMs and host machines need to communicate with each other to meet management and service requirements. However, in the complex network architecture of the cloud management system, services running on the management plane and VMs running on the service plane cannot communicate with each other at layer 3. As a result, service deployment and information collection are not fast enough. Therefore, a virtual serial port is required for communication between VMs and host machines. You can add serial port configuration items to the XML configuration file of a VM to implement communication between VMs and host machines. - -### Procedure - -The Linux VM serial port console is a pseudo terminal device connected to the host machine through the serial port of the VM. It implements interactive operations on the VM through the host machine. In this scenario, the serial port needs to be configured in the pty type. This section describes how to configure a pty serial port. - -- Add the following virtual serial port configuration items under the **devices** node in the XML configuration file of the VM: - - ``` - - - - - - ``` - -- Run the **virsh console** command to connect to the pty serial port of the running VM. - - ``` - # virsh console - ``` - -- To ensure that no serial port message is missed, use the **--console** option to connect to the serial port when starting the VM. - - ``` - # virsh start --console - ``` - - -## Managing Device Passthrough - -The device passthrough technology enables VMs to directly access physical devices. The I/O performance of VMs can be improved in this way. - -Currently, the VFIO passthrough is used. It can be classified into PCI passthrough and SR-IOV passthrough based on device type. - -### PCI Passthrough - -PCI passthrough directly assigns a physical PCI device on the host to a VM. The VM can directly access the device. PCI passthrough uses the VFIO device passthrough mode. The PCI passthrough configuration file in XML format for a VM is as follows: - -``` - - - -
- - -
- -``` - -**Table 1** Device configuration items for PCI passthrough - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value

-

hostdev.source.address.domain

-

Domain ID of the PCI device on the host OS.

-

≥ 0

-

hostdev.source.address.bus

-

Bus ID of the PCI device on the host OS.

-

≥ 1

-

hostdev.source.address.slot

-

Device ID of the PCI device on the host OS.

-

≥ 0

-

hostdev.source.address.function

-

Function ID of the PCI device on the host OS.

-

≥ 0

-

hostdev.driver.name

-

Backend driver of PCI passthrough. This parameter is optional.

-

vfio (default value)

-

hostdev.rom

-

Specifies whether the VM can access the ROM of the passthrough device.

-

This parameter can be set to on or off. The default value is on.

-
  • on: indicates that the VM can access the ROM of the passthrough device. For example, if a VM with a passthrough NIC needs to boot from the preboot execution environment (PXE), or a VM with a passthrough Host Bus Adapter (HBA) card needs to boot from the ROM, you can set this parameter to on.
  • off: indicates that the VM cannot access the ROM of the passthrough device.
-

hostdev.address type

-

Bus, Device, and Function (BDF) IDs on the guest OS displayed on the PCI device.

-

[0x03–0x1e] (range of slot ID)

-

Note:

-
  • domain indicates the domain information, bus indicates the bus ID, slot indicates the slot ID, and function indicates the function.
  • Except for slot, default values of these parameters are 0.
  • The first slot 0x00 is occupied by the system, the second slot 0x01 is occupied by the IDE controller and USB controller, and the third slot 0x02 is occupied by the video.
  • The last slot 0x1f is occupied by the PV channel.
-
- ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->VFIO passthrough is implemented by IOMMU group. Devices are divided to IOMMU groups based on access control services \(ACS\) on hardware. Devices in the same IOMMU group can be assigned to only one VM. If multiple functions on a PCI device belong to the same IOMMU group, they can be directly assigned to only one VM as well. - -### SR-IOV Passthrough - -#### Overview - -Single Root I/O Virtualization \(SR-IOV\) is a hardware-based virtualization solution. With the SR-IOV technology, a physical function \(PF\) can provide multiple virtual functions \(VFs\), and each VF can be directly assigned to a VM. This greatly improves hardware resource utilization and I/O performance of VMs. A typical application scenario is SR-IOV passthrough for NICs. With the SR-IOV technology, a physical NIC \(PF\) can function as multiple VF NICs, and then the VFs can be directly assigned to VMs. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- SR-IOV requires the support of physical hardware. Before using SR-IOV, ensure that the hardware device to be directly assigned supports SR-IOV and the device driver on the host OS works in SR-IOV mode. ->- The following describes how to query the NIC model: ->In the following command output, values in the first column indicate the PCI numbers of NICs, and **19e5:1822** indicates the vendor ID and device ID of the NIC. - ->``` -># lspci | grep Ether ->05:00.0 Ethernet controller: Device 19e5:1822 (rev 45) ->07:00.0 Ethernet controller: Device 19e5:1822 (rev 45) ->09:00.0 Ethernet controller: Device 19e5:1822 (rev 45) ->0b:00.0 Ethernet controller: Device 19e5:1822 (rev 45) ->81:00.0 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01) ->81:00.1 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01) - ->``` - - -#### Procedure - - -To configure SR-IOV passthrough for a NIC, perform the following steps: - -1. Enable the SR-IOV mode for the NIC. - 1. Ensure that VF driver support provided by the NIC supplier exists on the guest OS. Otherwise, VFs in the guest OS cannot work properly. - 2. Enable the SMMU/IOMMU support in the BIOS of the host OS. The enabling method varies depending on the servers of different vendors. For details, see the help documents of the servers. - 3. Configure the host driver to enable the SR-IOV VF mode. The following uses the Hi1822 NIC as an example to describe how to enable 16 VFs. - - ``` - echo 16 > /sys/class/net/ethX/device/sriov_numvfs - ``` - -2. Obtain the PCI BDF information of PFs and VFs. - 1. Run the following command to obtain the NIC resource list on the current board: - - ``` - # lspci | grep Eth - 03:00.0 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family (4*25GE) (rev 45) - 04:00.0 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family (4*25GE) (rev 45) - 05:00.0 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family (4*25GE) (rev 45) - 06:00.0 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family (4*25GE) (rev 45) - 7d:00.0 Ethernet controller: Huawei Technologies Co., Ltd. Device a222 (rev 20) - 7d:00.1 Ethernet controller: Huawei Technologies Co., Ltd. Device a222 (rev 20) - 7d:00.2 Ethernet controller: Huawei Technologies Co., Ltd. Device a221 (rev 20) - 7d:00.3 Ethernet controller: Huawei Technologies Co., Ltd. Device a221 (rev 20) - ``` - - 2. Run the following command to view the PCI BDF information of VFs: - - ``` - # lspci | grep "Virtual Function" - 03:00.1 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:00.2 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:00.3 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:00.4 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:00.5 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:00.6 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:00.7 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:01.0 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:01.1 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - 03:01.2 Ethernet controller: Huawei Technologies Co., Ltd. Hi1822 Family Virtual Function (rev 45) - ``` - - 3. Select an available VF and write its configuration to the VM configuration file based on its BDF information. For example, the bus ID of the device **03:00.1** is **03**, its slot ID is **00**, and its function ID is **1**. - -3. Identify and manage the mapping between PFs and VFs. - 1. Identify VFs corresponding to a PF. The following uses PF 03.00.0 as an example: - - ``` - # ls -l /sys/bus/pci/devices/0000\:03\:00.0/ - ``` - - The following symbolic link information is displayed. You can obtain the VF IDs \(virtfnX\) and PCI BDF IDs based on the information. - - 2. Identify the PF corresponding to a VF. The following uses VF 03:00.1 as an example: - - ``` - # ls -l /sys/bus/pci/devices/0000\:03\:00.1/ - ``` - - The following symbolic link information is displayed. You can obtain PCI BDF IDs of the PF based on the information. - - ``` - lrwxrwxrwx 1 root root 0 Mar 28 22:44 physfn -> ../0000:03:00.0 - ``` - - 3. Obtain names of NICs corresponding to the PFs or VFs. For example: - - ``` - # ls /sys/bus/pci/devices/0000:03:00.0/net - eth0 - ``` - - 4. Set the MAC address, VLAN, and QoS information of VFs to ensure that the VFs are in the **Up** state before passthrough. The following uses VF 03:00.1 as an example. The PF is eth0 and the VF ID is **0**. - - ``` - # ip link set eth0 vf 0 mac 90:E2:BA:21:XX:XX #Sets the MAC address. - # ifconfig eth0 up - # ip link set eth0 vf 0 rate 100 #Sets the VF outbound rate, in Mbit/s. - # ip link show eth0 #Views the MAC address, VLAN ID, and QoS information to check whether the configuration is successful. - ``` - -4. Mount the SR-IOV NIC to the VM. - - When creating a VM, add the SR-IOV passthrough configuration item to the VM configuration file. - - ``` - - - -
- - - - - - ``` - - **Table 1** SR-IOV configuration options - - - - - - - - - - - - - - - - - - - - - - - - -

Parameter

-

Description

-

Value

-

hostdev.managed

-

Two modes for libvirt to process PCI devices.

-

no: default value. The passthrough device is managed by the user.

-

yes: The passthrough device is managed by libvirt. Set this parameter to yes in the SR-IOV passthrough scenario.

-

hostdev.source.address.bus

-

Bus ID of the PCI device on the host OS.

-

≥ 1

-

hostdev.source.address.slot

-

Device ID of the PCI device on the host OS.

-

≥ 0

-

hostdev.source.address.function

-

Function ID of the PCI device on the host OS.

-

≥ 0

-
- - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >Disabling the SR-IOV function: - >To disable the SR-IOV function after the VM is stopped and no VF is in use, run the following command: - >The following uses the Hi1822 NIC \(corresponding network interface name: eth0\) as an example: - >``` - >echo 0 > /sys/class/net/eth0/device/sriov_numvfs - >``` - - -#### SR-IOV Passthrough for the HPRE Accelerator - -The accelerator engine is a hardware acceleration solution provided by TaiShan 200 server based on the Kunpeng 920 processor. The HPRE accelerator is used to accelerate SSL/TLS applications. It significantly reduces processor consumption and improves processor efficiency. -On the Kunpeng server, the VF of the HPRE accelerator on the host needs to be passed through to the VM for internal services of the VM. - -**Table 1** HPRE accelerator description - -| items | Description | -|-------------|-----------------------------------------------------------------------------------------------------| -| Device name | Hi1620 on-chip RSA/DH security algorithm accelerator (HPRE engine) | -| Function | Modular exponentiation, RSA key pair operation, DH calculation, and auxiliary operations of large numbers (modular exponentiation, modular multiplication, modulo, modular inverse, primality test, and co-prime test) | -| VendorID | 0x19E5 | -| PF DeviceID | 0xA258 | -| VF DeviceID | 0xA259 | -| Maximum number of VF | A maximum of 63 VFs can be created for an HPRE PF | - - ->![](./public_sys-resources/icon-note.gif) **Note** ->When a VM is using a VF device, the driver on the host cannot be uninstalled, and the accelerator does not support hot swap. ->VF operation (If VFNUMS is 0, the VF is disabled. hpre_num is used to identify a specific accelerator device): ->``` ->echo $VFNUMS > /sys/class/uacce/hisi_hpre-$hpre_num/device/sriov_numvfs ->``` - - - - -## Managing VM USB - -To facilitate the use of USB devices such as USB key devices and USB mass storage devices on VMs, openEuler provides the USB device passthrough function. Through USB passthrough and hot-swappable interfaces, you can configure USB passthrough devices for VMs, or hot swap USB devices when VMs are running. - -### Configuring USB Controllers - -#### Overview - -A USB controller is a virtual controller that provides specific USB functions for USB devices on VMs. To use USB devices on a VM, you must configure USB controllers for the VM. Currently, openEuler supports the following types of USB controllers: - -- Universal host controller interface \(UHCI\): also called the USB 1.1 host controller specification. -- Enhanced host controller interface \(EHCI\): also called the USB 2.0 host controller specification. -- Extensible host controller interface \(xHCI\): also called the USB 3.0 host controller specification. - -#### Precautions - -- The host server must have USB controller hardware and modules that support USB 1.1, USB 2.0, and USB 3.0 specifications. -- You need to configure USB controllers for the VM by following the order of USB 1.1, USB 2.0, and USB 3.0. -- An xHCI controller has eight ports and can be mounted with a maximum of four USB 3.0 devices and four USB 2.0 devices. An EHCI controller has six ports and can be mounted with a maximum of six USB 2.0 devices. A UHCI controller has two ports and can be mounted with a maximum of two USB 1.1 devices. -- On each VM, only one USB controller of the same type can be configured. -- USB controllers cannot be hot swapped. -- If the USB 3.0 driver is not installed on a VM, the xHCI controller may not be identified. For details about how to download and install the USB 3.0 driver, refer to the official description provided by the corresponding OS distributor. -- To ensure the compatibility of the OS, set the bus ID of the USB controller to **0** when configuring a USB tablet for the VM. The tablet is mounted to the USB 1.1 controller by default. - -#### Configuration Methods - -The following describes the configuration items of USB controllers for a VM. You are advised to configure USB 1.1, USB 2.0, and USB 3.0 to ensure the VM is compatible with three types of devices. - -The configuration item of the USB 1.1 controller \(UHCI\) in the XML configuration file is as follows: - -``` - - -``` - -The configuration item of the USB 2.0 controller \(EHCI\) in the XML configuration file is as follows: - -``` - - -``` - -The configuration item of the USB 3.0 controller \(xHCI\) in the XML configuration file is as follows: - -``` - - -``` - -### Configuring a USB Passthrough Device - -#### Overview - -After USB controllers are configured for a VM, a physical USB device on the host can be mounted to the VM through device passthrough for the VM to use. In the virtualization scenario, in addition to static configuration, hot swapping the USB device is supported. That is, the USB device can be mounted or unmounted when the VM is running. - -#### Precautions - -- A USB device can be assigned to only one VM. -- A VM with a USB passthrough device does not support live migration. -- VM creation fails if no USB passthrough devices exist in the VM configuration file. -- Forcibly hot removing a USB storage device that is performing read or write operation may damage files in the USB storage device. - -#### Configuration Description - -The following describes the configuration items of a USB device for a VM. - -Description of the USB device in the XML configuration file: - -``` - - -
- -
- -``` - -- **
**: _m_ indicates the USB bus address on the host, and _n_ indicates the device ID. -- **
**: indicates that the USB device is to be mounted to the USB controller specified on the VM. _x_ indicates the controller ID, which corresponds to the index number of the USB controller configured on the VM. _y_ indicates the port address. When configuring a USB passthrough device, you need to set this parameter to ensure that the controller to which the device is mounted is as expected. - -#### Configuration Methods - -To configure USB passthrough, perform the following steps: - -1. Configure USB controllers for the VM. For details, see [Configuring USB Controllers](#configuring-usb-controllers). -2. Query information about the USB device on the host. - - Run the **lsusb** command \(the **usbutils** software package needs to be installed\) to query the USB device information on the host, including the bus address, device address, device vendor ID, device ID, and product description. For example: - - ``` - # lsusb - ``` - - ``` - Bus 008 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub - Bus 007 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub - Bus 002 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub - Bus 004 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub - Bus 006 Device 002: ID 0bda:0411 Realtek Semiconductor Corp. - Bus 006 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub - Bus 005 Device 003: ID 136b:0003 STEC - Bus 005 Device 002: ID 0bda:5411 Realtek Semiconductor Corp. - Bus 005 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub - Bus 001 Device 003: ID 12d1:0003 Huawei Technologies Co., Ltd. - Bus 001 Device 002: ID 0bda:5411 Realtek Semiconductor Corp. - Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub - Bus 003 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub - ``` - -3. Prepare the XML description file of the USB device. Before hot removing the device, ensure that the USB device is not in use. Otherwise, data may be lost. -4. Run the hot swapping commands. - - Take a VM whose name is **openEulerVM** as an example. The corresponding configuration file is **usb.xml**. - - - Hot adding of the USB device takes effect only for the current running VM. After the VM is restarted, hot add the USB device again. - - ``` - # virsh attach-device openEulerVM usb.xml --live - ``` - - - Complete persistency configurations for hot adding of the USB device. After the VM is restarted, the USB device is automatically assigned to the VM. - - ``` - # virsh attach-device openEulerVM usb.xml --config - ``` - - - Hot removing of the USB device takes effect only for the current running VM. After the VM is restarted, the USB device with persistency configurations is automatically assigned to the VM. - - ``` - # virsh detach-device openEulerVM usb.xml --live - ``` - - - Complete persistency configurations for hot removing of the USB device. - - ``` - # virsh detach-device openEulerVM usb.xml --config - ``` - - - -## Storing Snapshots - -### Overview - -The VM system may be damaged due to virus damage, system file deletion by mistake, or incorrect formatting. As a result, the system cannot be started. To quickly restore a damaged system, openEuler provides the storage snapshot function. openEuler can create a snapshot that records the VM status at specific time points without informing users \(usually within a few seconds\). The snapshot can be used to restore the VM to the status when the snapshots were taken. For example, a damaged system can be quickly restored with the help of snapshots, which improves system reliability. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Currently, storage snapshots can be QCOW2 and RAW images only. Block devices are not supported. - -### Procedure - -To create VM storage snapshots, perform the following steps: - -1. Log in to the host and run the **virsh domblklist** command to query the disk used by the VM. - - ``` - # virsh domblklist openEulerVM - Target Source - --------------------------------------------- - vda /mnt/openEuler-image.qcow2 - ``` - - -1. Run the following command to create the VM disk snapshot **openEuler-snapshot1.qcow2**: - - ``` - # virsh snapshot-create-as --domain openEulerVM --disk-only --diskspec vda,snapshot=external,file=/mnt/openEuler-snapshot1.qcow2 --atomic - Domain snapshot 1582605802 created - ``` - - -1. Run the following command to query disk snapshots: - - ``` - # virsh snapshot-list openEulerVM - Name Creation Time State - --------------------------------------------------------- - 1582605802 2020-02-25 12:43:22 +0800 disk-snapshot - ``` - diff --git a/docs/en/docs/Virtualization/managing-vms.md b/docs/en/docs/Virtualization/managing-vms.md deleted file mode 100644 index 480172aa2e1abf1164c4b62872ef0a49b5dd2e1d..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/managing-vms.md +++ /dev/null @@ -1,802 +0,0 @@ -## Managing VMs - -- [Managing VMs](#managing-vms) - - [VM Life Cycle](#vm-life-cycle) - - [Introduction](#introduction-0) - - [Management Commands](#management-commands) - - [Example](#example) - - [Modify VM Configurations Online](#modify-vm-configurations-online) - - [Querying VM Information](#querying-vm-information) - - [Logging In to a VM](#logging-in-to-a-vm) - - [Logging In Using VNC Passwords](#logging-in-using-vnc-passwords) - - [Configuring VNC TLS Login](#configuring-vnc-tls-login) - - [VM Secure Boot](#VM-Secure-Boot) - - [General Introduction](#General-Introduction) - - [Secure Boot Practice](#Secure-Boot-Practice) - -### VM Life Cycle - -#### Introduction - -##### Overview - -To leverage hardware resources and reduce costs, users need to properly manage VMs. This section describes basic operations during the VM lifecycle, such as creating, using, and deleting VMs. - -##### VM Status - -A VM can be in one of the following status: - -- **undefined**: The VM is not defined or created. That is, libvirt considers that the VM does not exist. -- **shut off**: The VM has been defined but is not running, or the VM is terminated. -- **running**: The VM is running. -- **paused**: The VM is suspended and its running status is temporarily stored in the memory. The VM can be restored to the running status. -- **saved**: Similar to the **paused** status, the running state is stored in a persistent storage medium and can be restored to the running status. -- **crashed**: The VM crashes due to an internal error and cannot be restored to the running status. - -##### Status Transition - -VMs in different status can be converted, but certain rules must be met. [Figure 1](#fig671014583483) describes the common rules for transiting the VM status. - -**Figure 1** Status transition diagram -![](./figures/status-transition-diagram.png "status-transition-diagram") - -##### VM ID - -In libvirt, a created VM instance is called a **domain**, which describes the configuration information of resources such as the CPU, memory, network device, and storage device of the VM. On a host, each domain has a unique ID, which is represented by the VM **Name**, **UUID**, and **Id**. For details, see [Table 1](#table84397266483). During the VM lifecycle, an operation can be performed on a specific VM by using a VM ID. - -**Table 1** Domain ID description - - - - - - - - - - - - - - - - -

ID

-

Description

-

Name

-

VM name

-

UUID

-

Universally unique identifier

-

Id

-

VM running ID

-
NOTE:

The ID is not displayed for a powered off VM.

-
-
- ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->Run the **virsh** command to query the VM ID and UUID. For details, see [Querying VM Information](#querying-vm-information). - -#### Management Commands - -##### Overview - -You can use the **virsh** command tool to manage the VM lifecycle. This section describes the commands related to the lifecycle. - -##### Prerequisites - -- Before performing operations on a VM, you need to query the VM status to ensure that the operations can be performed. For details about the conversion between status, see [Status Transition](#introduction-0). -- You have administrator rights. -- The VM XML configuration files are prepared. - -##### Command Usage - -You can run the **virsh** command to manage the VM lifecycle. The command format is as follows: - -``` -virsh -``` - -The parameters are described as follows: - -- _operate_: manages VM lifecycle operations, such as creating, deleting, and starting VMs. -- _obj_: specifies the operation object, for example, the VM to be operated. -- _options_: command option. This parameter is optional. - -[Table 1](#table389518422611) describes the commands used for VM lifecycle management. _VMInstanse_ indicates the VM name, VM ID, or VM UUID, _XMLFile_ indicates the XML configuration file of the VM, and _DumpFile_ indicates the dump file. Change them based on the site requirements. - -**Table 1** VM Lifecycle Management Commands - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Command

-

Description

-

virsh define <XMLFile>

-

Define a persistent VM. After the definition is complete, the VM is shut down and is considered as a domain instance.

-

virsh create <XMLFile>

-

Create a temporary VM. After the VM is created, it is in the running status.

-

virsh start <VMInstanse>

-

Start the VM.

-

virsh shutdown <VMInstanse>

-

Shut down the VM. Start the VM shutdown process. If the VM fails to be shut down, forcibly stop it.

-

virsh destroy <VMInstanse>

-

Forcibly stop the VM.

-

virsh reboot <VMInstanse>

-

Reboot the VM.

-

virsh save <VMInstanse> <DumpFile>

-

Dump the VM running status to a file.

-

virsh restore <DumpFile>

-

Restore the VM from the VM status dump file.

-

virsh suspend <VMInstanse>

-

Suspend the VM to make the VM in the paused status.

-

virsh resume <VMInstanse>

-

Resume the VM and restore the VM in the paused status to the running status.

-

virsh undefine <VMInstanse>

-

After a persistent VM is destroyed, the VM lifecycle ends and no more operations can be performed on the VM.

-
- -#### Example - -This section provides examples of commands related to VM life cycle management. - -- Create a VM. - - The VM XML configuration file is **openEulerVM.xml**. The command and output are as follows: - - ``` - # virsh define openEulerVM.xml - Domain openEulerVM defined from openEulerVM.xml - ``` - - -- Start a VM. - - Run the following command to start the _openEulerVM_: - - ``` - # virsh start openEulerVM - Domain openEulerVM started - ``` - -- Reboot a VM. - - Run the following command to reboot the _openEulerVM_: - - ``` - # virsh reboot openEulerVM - Domain openEulerVM is being rebooted - ``` - -- Shut down a VM. - - Run the following command to shut down the _openEulerVM_: - - ``` - # virsh shutdown openEulerVM - Domain openEulerVM is being shutdown - ``` - -- Destroy a VM. - - If the **nvram** file is not used during the VM startup, run the following command to destroy the VM: - - ``` - # virsh undefine - ``` - - - If the **nvram** file is used during the VM startup, run the following command to specify the **nvram** processing policy when destroying the VM: - - ``` - # virsh undefine - ``` - - _strategy_ indicates the policy for destroying a VM. The values can be: - - --**nvram**: delete the corresponding **nvram** file when destroying a VM. - - --**keep-nvram**: destroy a VM but retain the corresponding **nvram** file. - - For example, to delete the _openEulerVM_ and its **nvram** file, run the following command: - - ``` - # virsh undefine openEulerVM --nvram - Domain openEulerVM has been undefined - ``` - - - -### Modify VM Configurations Online - -#### Overview - -After a VM is created, users can modify VM configurations. This process is called online modification of VM configuration. After the configuration is modified online, the new VM configuration file is persistent and takes effect after the VM is shut down and restarted. - -The format of the command for modifying VM configuration is as follows: - -``` -virsh edit -``` - -The **virsh edit** command is used to edit the XML configuration file corresponding to **domain** to update VM configuration. **virsh edit** uses the **vi** program as the default editor. You can specify the editor type by modifying the environment variable _EDITOR_ or _VISUAL_. By default, **virsh edit** preferentially uses the text editor specified by the environment variable _VISUAL_. - -#### Procedure - -1. \(Optional\) Set the editor of the **virsh edit** command to **vim**. - - ``` - # export VISUAL=vim - ``` - -2. Run the **virsh edit** command to open the XML configuration file of the _openEulerVM_. - - ``` - # virsh edit openEulerVM - ``` - -3. Modify the VM configuration file. -4. Save the VM configuration file and exit. -5. Reboot the VM for the configuration to take effect. - - ``` - # virsh reboot openEulerVM - ``` - - -### Querying VM Information - -#### Overview - -The libvirt provides a set of command line tools to query VM information. This section describes how to use commands to obtain VM information. - -#### Prerequisites - -To query VM information, the following requirements must be met: - -- The libvirtd service is running. - -- Only the administrator has the permission to execute command line. - -#### Querying VM Information on a Host. - -- Query the list of running and paused VMs on a host. - - ``` - # virsh list - ``` - - For example, the following command output indicates that three VMs exist on the host. **openEulerVM01** and **openEulerVM02** are running, and **openEulerVM03** is paused. - - ``` - Id Name State - ---------------------------------------------------- - 39 openEulerVM01 running - 40 openEulerVM02 running - 69 openEulerVM03 paused - ``` - - -- Query the list of VM information defined on a host. - - ``` - # virsh list --all - ``` - - For example, the following command output indicates that four VMs are defined on the current host. **openEulerVM01** is running, **openEulerVM02** is paused, and **openEulerVM03** and **openEulerVM04** are shut down. - - ``` - Id Name State - ---------------------------------------------------- - 39 openEulerVM01 running - 69 openEulerVM02 paused - - openEulerVM03 shut off - - openEulerVM04 shut off - ``` - - -#### Querying Basic VM Information - -Libvirt component provides a group of commands for querying the VM status, including the VM running status, device information, and scheduling attributes. For details, see [Table 1](#table10582103963816). - -**Table 1** Querying basic VM information - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Information to be queried

-

Command line

-

Description

-

Basic information

-

virsh dominfo <VMInstance>

-

The information includes the VM ID, UUID, and VM specifications.

-

Current status

-

virsh domstate <VMInstance>

-

You can use the --reason option to query the reason why the VM changes to the current status.

-

Scheduling information

-

virsh schedinfo <VMInstance>

-

The information includes the vCPU share.

-

Number of vCPUs

-

virsh vcpucount <VMInstance>

-

Number of vCPUs of the VM.

-

Virtual block device status

-

virsh domblkstat <VMInstance>

-

To query the name of a block device, run the virsh domblklist command.

-

vNIC status

-

virsh domifstat <VMInstance>

-

To query the NIC name, run the virsh domiflist command.

-

I/O thread

-

virsh iothreadinfo <VMInstance>

-

VM I/O thread and CPU affinity.

-
- -#### Example - -- Run the **virsh dominfo** command to query the basic information about a created VM. The query result shows that the VM ID is **5**, UUID is **ab472210-db8c-4018-9b3e-fc5319a769f7**, memory size is 8 GiB, and the number of vCPUs is 4. - - ``` - # virsh dominfo openEulerVM - Id: 5 - Name: openEulerVM - UUID: ab472210-db8c-4018-9b3e-fc5319a769f7 - OS Type: hvm - State: running - CPU(s): 4 - CPU time: 6.8s - Max memory: 8388608 KiB - Used memory: 8388608 KiB - Persistent: no - Autostart: disable - Managed save: no - Security model: none - Security DOI: 0 - ``` - - -- Run the **virsh domstate** command to query the VM status. The query result shows that VM **openEulerVM** is running. - - ``` - # virsh domstate openEulerVM - running - ``` - -- Run **virsh schedinfo** to query the VM scheduling information. The query result shows that the CPU reservation share of the VM is 1024. - - ``` - # virsh schedinfo openEulerVM - Scheduler : posix - cpu_shares : 1024 - vcpu_period : 100000 - vcpu_quota : -1 - emulator_period: 100000 - emulator_quota : -1 - global_period : 100000 - global_quota : -1 - iothread_period: 100000 - iothread_quota : -1 - ``` - -- Run the **virsh vcpucount** command to query the number of vCPUs. The query result shows that the VM has four CPUs. - - ``` - # virsh vcpucount openEulerVM - maximum live 4 - current live 4 - ``` - -- Run the **virsh domblklist** command to query the VM disk information. The query result shows that the VM has two disks. sda is a virtual disk in qcow2 format, and sdb is a cdrom device. - - ``` - # virsh domblklist openEulerVM - Target Source - --------------------------------------------------------------------- - sda /home/openeuler/vm/openEuler_aarch64.qcow2 - sdb /home/openeuler/vm/openEuler-21.03-aarch64-dvd.iso - ``` - -- Run the **virsh domiflist** command to query the VM NIC information. The query result shows that the VM has one NIC, the backend is vnet0, which is on the br0 bridge of the host. The MAC address is 00:05:fe:d4:f1:cc. - - ``` - # virsh domiflist openEulerVM - Interface Type Source Model MAC - ------------------------------------------------------- - vnet0 bridge br0 virtio 00:05:fe:d4:f1:cc - ``` - -- Run the **virsh iothreadinfo** command to query the VM I/O thread information. The query result shows that the VM has five I/O threads, which are scheduled on physical CPUs 7-10. - - ``` - # virsh iothreadinfo openEulerVM - IOThread ID CPU Affinity - --------------------------------------------------- - 3 7-10 - 4 7-10 - 5 7-10 - 1 7-10 - 2 7-10 - ``` - - -### Logging In to a VM - -This section describes how to log in to a VM using VNC. - -#### Logging In Using VNC Passwords - -##### Overview - -After the OS is installed on a VM, you can remotely log in to the VM using VNC to manage the VM. - -##### Prerequisites - -Before logging in to a VM using a client, such as RealVNC or TightVNC, ensure that: - -- You have obtained the IP address of the host where the VM resides. -- The environment where the client resides can access the network of the host. -- You have obtained the VNC listening port of the VM. This port is automatically allocated when the client is started. Generally, the port number is **5900 + x** \(_x_ is a positive integer and increases in ascending order based on the VM startup sequence. **5900** is invisible to users.\) -- If a password has been set for the VNC, you also need to obtain the VNC password of the VM. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >To set a password for the VM VNC, edit the XML configuration file of the VM. That is, add the **passwd** attribute to the **graphics** element and set the attribute value to the password to be configured. For example, to set the VNC password of the VM to **n8VfjbFK**, configure the XML file as follows: - >``` - > - > - > - >``` - - -##### Procedure -##### Procedure - -1. Query the VNC port number used by the VM. For example, if the VM name is _openEulerVM_, run the following command: - - ``` - # virsh vncdisplay openEulerVM - :3 - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >To log in to the VNC, you need to configure firewall rules to allow the connection of the VNC port. The reference command is as follows, where _X_ is **5900 + Port number**, for example, **5903**. - >``` - >firewall-cmd --zone=public --add-port=X/tcp - >``` - -2. Start the VncViewer software and enter the IP address and port number of the host. The format is **host IP address:port number**, for example, **10.133.205.53:3**. -3. Click **OK** and enter the VNC password \(optional\) to log in to the VM VNC. - -#### Configuring VNC TLS Login - -##### Overview - -By default, the VNC server and client transmit data in plaintext. Therefore, the communication content may be intercepted by a third party. To improve security, openEuler allows the VNC server to configure the Transport Layer Security \(TLS\) mode for encryption and authentication. TLS implements encrypted communication between the VNC server and client to prevent communication content from being intercepted by third parties. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- To use the TLS encryption authentication mode, the VNC client must support the TLS mode \(for example, TigerVNC\). Otherwise, the VNC client cannot be connected. ->- The TLS encryption authentication mode is configured at the host level. After this feature is enabled, the TLS encryption authentication mode is enabled for the VNC clients of all VMs running on the host. - -##### Procedure - -To enable the TLS encryption authentication mode for the VNC, perform the following steps: - -1. Log in to the host where the VNC server resides, and edit the corresponding configuration items in the **/etc/libvirt/qemu.conf** configuration file of the server. The configuration is as follows: - - ``` - vnc_listen = "x.x.x.x" # "x.x.x.x" indicates the listening IP address of the VNC. Set this parameter based on the site requirements. The VNC server allows only the connection requests from clients whose IP addresses are in this range. - vnc_tls = 1 # If this parameter is set to 1, VNC TLS is enabled. - vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc" # Specify /etc/pki/libvirt-vnc as the path for storing the certificate. - vnc_tls_x509_verify = 1 #If this parameter is set to 1, the X509 certificate is used for TLS authentication. - ``` - -2. Create a certificate and a private key file for the VNC. The following uses GNU TLS as an example. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >To use GNU TLS, install the gnu-utils software package in advance. - - 1. Create a certificate file issued by the Certificate Authority \(CA\). - - ``` - # certtool --generate-privkey > ca-key.pem - ``` - - 1. Create a self-signed public and private key for the CA certificate. _Your organization name_ indicates the organization name, which is specified by the user. - - ``` - # cat > ca.info< server.info< server-key.pem - # certtool --generate-certificate \ - --load-ca-certificate ca-cert.pem \ - --load-ca-privkey ca-key.pem \ - --load-privkey server-key.pem \ - --template server.info \ - --outfile server-cert.pem - ``` - - In the preceding generated file, **server-key.pem** is the private key of the VNC server, and **server-cert.pem** is the public key of the VNC server. - - 3. Issue a certificate to the VNC client. - - ``` - # cat > client.info< client-key.pem - # certtool --generate-certificate \ - --load-ca-certificate ca-cert.pem \ - --load-ca-privkey ca-key.pem \ - --load-privkey client-key.pem \ - --template client.info \ - --outfile client-cert.pem - ``` - - In the preceding generated file, **client-key.pem** is the private key of the VNC client, and **client-cert.pem** is the public key of the VNC client. The generated public and private key pairs need to be copied to the VNC client. - -3. Shut down the VM to be logged in to and restart the libvirtd service on the host where the VNC server resides. - - ``` - # systemctl restart libvirtd - ``` - -4. Save the generated server certificate to the specified directory on the VNC server and grant the read and write permissions on the certificate only to the current user. - - ``` - # sudo mkdir -m 750 /etc/pki/libvirt-vnc - # cp ca-cert.pem /etc/pki/libvirt-vnc/ca-cert.pem - # cp server-cert.pem /etc/pki/libvirt-vnc/server-cert.pem - # cp server-key.pem /etc/pki/libvirt-vnc/server-key.pem - # chmod 0600 /etc/pki/libvirt-vnc/* - ``` - -5. Copy the generated client certificates **ca-cert.pem**, **client-cert.pem**, and **client-key.pem** to the VNC client. After the TLS certificate of the VNC client is configured, you can use VNC TLS to log in to the VM. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- For details about how to configure the VNC client certificate, see the usage description of each client. - >- For details about how to log in to the VM, see Logging In Using VNC Passwords. - -### VM Secure Boot - -#### General Introduction - -##### Overview - -Secure boot uses public and private key pairs to sign and validate boot components. During the startup, the previous component validates the digital signature of the next component. If the validation is successful, the next component starts. If the validation fails, the startup fails. Secure boot is used to detect whether the firmware and software during startup of the device are tampered with to prevent malware from intrusion and modification. Secure boot ensures the integrity of each component during system startup and prevents unauthorized components from being loaded and running, thereby preventing security threats to the system and user data. Secure boot is implemented based on the UEFI boot mode. It is not supported by the legacy boot mode. According to UEFI specifications, some reliable public keys can be built in the mainboard before delivery. Any operating system or hardware drivers that you want to load on this mainboard must be authenticated by these public keys. The secure boot of a physical machine is implemented by the physical BIOS, while the secure boot of a VM is simulated by software. The process of the VM secure boot is the same as that of the host secure boot, both complying with the open-source UEFI specifications. The UEFI on the virtualization platform is provided by the edk component. When a VM starts, QEMU maps the UEFI image to the memory to simulate the firmware startup process for the VM. Secure boot is a security protection capability provided by edk during the VM startup to protect the OS kernel of the VM from being tampered with. The sequence of signature validation for the secure boot is as follows: UEFI BIOS->shim->GRUB->vmlinuz (signature validation is passed and loaded in sequence). - -| English | Acronyms and Abbreviations | Description | -| :----- | :----- | :----- | -| Secure boot | - | Secure boot indicates that a component validates the digital signature of the next component during startup. If the validation is successful, the component runs. If the validation fails, the component stops running. It ensures the integrity of each component during system startup. | -| Platform key | PK | Platform key is owned by the OEM vendor and must be RSA2048 or stronger. The PK establishes a trusted relationship between the platform owner and the platform firmware. The platform owner registers the PKpub, public key of the PK, with the platform firmware. The platform owner can use the PKpriv, private part of the PK, to change the ownership of the platform or register the KEK key. | -| Key exchange key | KEK | Key exchange key creates a trusted relationship between the platform firmware and the OS. Each OS and third-party application that communicates with the platform firmware register the KEKpub, public part of the KEK key, in the platform firmware. | -| Database trustlist | DB | Database trustlist stores and validates the keys of components such as shim, GRUB, and vmlinuz. | -| Database blocklist | DBx | Database blocklist stores revoked keys. | - -##### Function Description - -The VM secure boot feature is implemented based on the edk open-source project. In non-secure boot mode, the basic Linux process is as follows: - -**Figure 1** System startup process - -![](./figures/OSBootFlow.png) - -In secure boot mode, the first component loaded after UEFI BIOS starts is shim in the system image. By interacting with UEFI BIOS, shim obtains the key stored in the variable DB of UEFI BIOS to validate GRUB. After GRUB is loaded, the key and the authentication API are also called to validate the kernel. The Linux boot process is as follows: - -**Figure 2** Secure boot process - -![](./figures/SecureBootFlow.png) - -The secure boot feature involves multiple key scenarios. Based on the scenario analysis and system breakdown, the secure boot feature involves the following subsystems: UEFI BIOS validating shim, shim validating GRUB, and GRUB validating kernel. When UEFI BIOS validates shim, if the validation is successful, shim is started. If the validation fails, an error message is displayed and shim fails to start. Shim needs to use the private key for signature during image compilation and creation, and the public key certificate needs to be imported to the variable area DB of UEFI BIOS. After shim is started, validate the startup of GRUB. If the validation is successful, GRUB is started. If the validation fails, an error message is displayed and GRUB fails to start. GRUB needs to be signed during image compilation and creation. The public and private key pairs are the same as those of shim. After GRUB is started, it calls the key and the authentication API key registered in UEFI BIOS to validate the kernel. If the validation is successful, GRUB starts the kernel. If the validation fails, an error message is displayed. GRUB needs to sign the image during compilation and creation and uses the public and private key pair that is the same as that of shim. - -##### Constraints - -* Running on the UEFI BIOS that does not support secure boot does not affect existing functions and services. -* The secure boot feature depends on the UEFI BIOS and takes effect only when the UEFI supports this feature. -* When secure boot is enabled in the UEFI BIOS, the system cannot be started if the related components have no signature or the signature is incorrect. -* If secure boot is disabled in the UEFI BIOS, the validation function during the boot process is disabled. -* The second half of the secure boot validation chain, that is, shim->GRUB->kernel, guides the kernel to start. This part of the validation chain is implemented by the OS image. If the OS does not support guiding the kernel for secure boot, the VM secure boot fails. -* Currently, the x86 architecture do not provide nvram file configuration to configure VM secure boot. - -#### Secure Boot Practice - -VM secure boot depends on UEFI BIOS. The UEFI BIOS image is installed using the edk rpm package. This section uses AArch64 as an example to describe how to configure VM secure boot. - -##### Configuring VM - -The components in the edk rpm package are installed in the /usr/share/edk2/aarch64 directory, including `QEMU_EFI-pflash.raw` and `vars-template-pflash.raw`. The following describes the XML configuration of the UEFI BIOS during VM startup. - -``` - - hvm - /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw - /path/to/QEMU-VARS.fd - -``` - -In the preceding configuration, /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw indicates the path of the UEFI BIOS image. /usr/share/edk2/aarch64/vars-template-pflash.raw is the path of the NVRAM image template, and /path/to/QEMU-VARS.fd is the path of the NVRAM image file of the current VM, which is used to store environment variables in the UEFI BIOS. - -##### Importing Certificate - -The certificate for VM secure boot is imported from the BIOS page. Before importing the certificate, you need to import the certificate file to the VM. You can mount the directory where the certificate file is located to the VM by mounting a disk. For example, you can create an image that contains the certificate and mount the image in the XML configuration file of the VM. - -Create a certificate file image. - -``` -dd of='/path/to/data.img' if='/dev/zero' bs=1M count=64 -mkfs.vfat -I /path/to/data.img -mkdir /path/to/mnt -mount path/to/data.img /path/to/mnt/ -cp -a /path/to/certificates/* /path/to/mnt/ -umount /path/to/mnt/ -``` -In the preceding command, /path/to/certificates/ indicates the path where the certificate file is located, /path/to/data.img indicates the path where the certificate file image is located, and /path/to/mnt/ indicates the image mounting path. - -Mount the image in the XML file of the VM. - -``` - - - - - - - - - -``` - -Start the VM and import the PK certificate. The procedure is as follows (the procedure for importing the KEK certificate is the same as that for importing the DB certificate): - -After the VM is started, press F2 to go to the BIOS screen. - -**Figure 1** BIOS screen - -![](./figures/CertEnrollP1.png) - -**Figure 2** Device Manager - -![](./figures/CertEnrollP2.png) - -**Figure 3** Custom Secure Boot Options - -![](./figures/CertEnrollP3.png) - -**Figure 4** PK Options - -![](./figures/CertEnrollP4.png) - -**Figure 5** Enrolling PK - -![](./figures/CertEnrollP5.png) - -In the File Explorer window, many disk directories are displayed, including the certificate file directory mounted through the disk. - -**Figure 6** File Explorer - -![](./figures/CertEnrollP6.png) - -Select the PK certificate to be imported in the disk directory. - -**Figure 7** Disk where the certificate is stored - -![](./figures/CertEnrollP7.png) - -**Figure 8** Selecting Commit Changes and Exit to save the imported certificate - -![](./figures/CertEnrollP8.png) - -After the certificate is imported, the UEFI BIOS writes the certificate information and secure boot attributes to the NVRAM configuration file /path/to/QEMU-VARS.fd. Upon the next startup, the VM reads related configurations from the /path/to/QEMU-VARS.fd file, initializes certificate information and secure boot attributes, automatically imports the certificate, and enables secure boot. Similarly, you can use /path/to/QEMU-VARS.fd as the UEFI BIOS boot configuration template file of other VMs with the same configuration. Modify the nvram template field so that the certificate is automatically imported and the secure boot option is enabled when other VMs are started. The VM XML configuration is modified as follows: - -``` - - hvm - /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw - - -``` - -##### Secure Boot Observation - -After the VM is correctly configured and the PK, KEK, and DB certificates are imported, the VM runs in secure boot mode. You can configure the serial port log file in the VM configuration file in XML format to check whether the VM is in the secure boot mode. The following figure shows how to configure the serial port log file. - -``` - - - -``` - -After the OS image is successfully loaded to the VM, if "UEFI Secure Boot is enabled" is displayed in the serial port log file, the VM is in the secure boot state. \ No newline at end of file diff --git a/docs/en/docs/Virtualization/public_sys-resources/icon-caution.gif b/docs/en/docs/Virtualization/public_sys-resources/icon-caution.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/public_sys-resources/icon-caution.gif and /dev/null differ diff --git a/docs/en/docs/Virtualization/public_sys-resources/icon-danger.gif b/docs/en/docs/Virtualization/public_sys-resources/icon-danger.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/public_sys-resources/icon-danger.gif and /dev/null differ diff --git a/docs/en/docs/Virtualization/public_sys-resources/icon-note.gif b/docs/en/docs/Virtualization/public_sys-resources/icon-note.gif deleted file mode 100644 index 6314297e45c1de184204098efd4814d6dc8b1cda..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/public_sys-resources/icon-note.gif and /dev/null differ diff --git a/docs/en/docs/Virtualization/public_sys-resources/icon-notice.gif b/docs/en/docs/Virtualization/public_sys-resources/icon-notice.gif deleted file mode 100644 index 86024f61b691400bea99e5b1f506d9d9aef36e27..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/public_sys-resources/icon-notice.gif and /dev/null differ diff --git a/docs/en/docs/Virtualization/public_sys-resources/icon-tip.gif b/docs/en/docs/Virtualization/public_sys-resources/icon-tip.gif deleted file mode 100644 index 93aa72053b510e456b149f36a0972703ea9999b7..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/public_sys-resources/icon-tip.gif and /dev/null differ diff --git a/docs/en/docs/Virtualization/public_sys-resources/icon-warning.gif b/docs/en/docs/Virtualization/public_sys-resources/icon-warning.gif deleted file mode 100644 index 6e90d7cfc2193e39e10bb58c38d01a23f045d571..0000000000000000000000000000000000000000 Binary files a/docs/en/docs/Virtualization/public_sys-resources/icon-warning.gif and /dev/null differ diff --git a/docs/en/docs/Virtualization/system-resource-management.md b/docs/en/docs/Virtualization/system-resource-management.md deleted file mode 100644 index 33553d2ad0d49d370aeadfd1c8cdb84e9abe5984..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/system-resource-management.md +++ /dev/null @@ -1,463 +0,0 @@ -# system Resource Management - -[[toc]] - - -The **libvirt** command manages VM system resources, such as vCPU and virtual memory resources. - -Before you start: - -- Ensure that the libvirtd daemon is running on the host. -- Run the **virsh list --all** command to check that the VM has been defined. - - -## Managing vCPU - -### CPU Shares - -#### Overview - -In a virtualization environment, multiple VMs on the same host compete for physical CPUs. To prevent some VMs from occupying too many physical CPU resources and affecting the performance of other VMs on the same host, you need to balance the vCPU scheduling of VMs to prevent excessive competition for physical CPUs. - -The CPU share indicates the total capability of a VM to compete for physical CPU computing resources. You can set **cpu\_shares** to specify the VM capacity to preempt physical CPU resources. The value of **cpu\_shares** is a relative value without a unit. The CPU computing resources obtained by a VM are the available computing resources of physical CPUs \(excluding reserved CPUs\) allocated to VMs based on the CPU shares. Adjust the CPU shares to ensure the service quality of VM CPU computing resources. - -#### Procedure - -Change the value of **cpu\_shares** allocated to the VM to balance the scheduling between vCPUs. - -- Check the current CPU share of the VM. - - ``` - # virsh schedinfo - Scheduler : posix - cpu_shares : 1024 - vcpu_period : 100000 - vcpu_quota : -1 - emulator_period: 100000 - emulator_quota : -1 - global_period : 100000 - global_quota : -1 - iothread_period: 100000 - iothread_quota : -1 - ``` - - -- Online modification: Run the **virsh schedinfo** command with the **--live** parameter to modify the CPU share of a running VM. - - ``` - # virsh schedinfo --live cpu_shares= - ``` - - For example, to change the CPU share of the running _openEulerVM_ from **1024** to **2048**, run the following commands: - - ``` - # virsh schedinfo openEulerVM --live cpu_shares=2048 - Scheduler : posix - cpu_shares : 2048 - vcpu_period : 100000 - vcpu_quota : -1 - emulator_period: 100000 - emulator_quota : -1 - global_period : 100000 - global_quota : -1 - iothread_period: 100000 - iothread_quota : -1 - ``` - - The modification of the **cpu\_shares** value takes effect immediately. The running time of the _openEulerVM_ is twice the original running time. However, the modification will become invalid after the VM is shut down and restarted. - -- Permanent modification: Run the **virsh schedinfo** command with the **--config** parameter to change the CPU share of the VM in the libvirt internal configuration. - - ``` - # virsh schedinfo --config cpu_shares= - ``` - - For example, run the following command to change the CPU share of _openEulerVM_ from **1024** to **2048**: - - ``` - # virsh schedinfo openEulerVM --config cpu_shares=2048 - Scheduler : posix - cpu_shares : 2048 - vcpu_period : 0 - vcpu_quota : 0 - emulator_period: 0 - emulator_quota : 0 - global_period : 0 - global_quota : 0 - iothread_period: 0 - iothread_quota : 0 - ``` - - The modification on **cpu\_shares** does not take effect immediately. Instead, the modification takes effect after the _openEulerVM_ is started next time and takes effect permanently. The running time of the _openEulerVM_ is twice that of the original VM. - - -### Binding the QEMU Process to a Physical CPU - -#### Overview - -You can bind the QEMU main process to a specific physical CPU range, ensuring that VMs running different services do not interfere with adjacent VMs. For example, in a typical cloud computing scenario, multiple VMs run on one physical machine, and they carry diversified services, causing different degrees of resource occupation. To avoid interference of a VM with dense-storage I/O to an adjacent VM, storage processes that process I/O of different VMs need to be completely isolated. The QEMU main process handles frontend and backend services. Therefore, isolation needs to be implemented. - -#### Procedure - -Run the **virsh emulatorpin** command to bind the QEMU main process to a physical CPU. - -- Check the range of the physical CPU bound to the QEMU process: - - ``` - # virsh emulatorpin openEulerVM - emulator: CPU Affinity - ---------------------------------- - *: 0-63 - ``` - - This indicates that the QEMU main process corresponding to VM **openEulerVM** can be scheduled on all physical CPUs of the host. - -- Online binding: Run the **vcpu emulatorpin** command with the **--live** parameter to modify the binding relationship between the QEMU process and the running VM. - - ``` - # virsh emulatorpin openEulerVM --live 2-3 - - # virsh emulatorpin openEulerVM - emulator: CPU Affinity - ---------------------------------- - *: 2-3 - ``` - - The preceding commands bind the QEMU process corresponding to VM **openEulerVM** to physical CPUs **2** and **3**. That is, the QEMU process is scheduled only on the two physical CPUs. The binding relationship takes effect immediately but becomes invalid after the VM is shut down and restarted. - -- Permanent binding: Run the **virsh emulatorpin** command with the **--config** parameter to modify the binding relationship between the VM and the QEMU process in the libvirt internal configuration. - - ``` - # virsh emulatorpin openEulerVM --config 0-3,^1 - - # virsh emulatorpin euler - emulator: CPU Affinity - ---------------------------------- - *: 0,2-3 - ``` - - The preceding commands bind the QEMU process corresponding to VM **openEulerVM** to physical CPUs **0**, **2** and **3**. That is, the QEMU process is scheduled only on the three physical CPUs. The modification of the binding relationship does not take effect immediately. Instead, the modification takes effect after the next startup of the VM and takes effect permanently. - - -### Adjusting the vCPU Binding Relationship - -#### Overview - -The vCPU of a VM is bound to a physical CPU. That is, the vCPU is scheduled only on the bound physical CPU to improve VM performance in specific scenarios. For example, in a NUMA system, vCPUs are bound to the same NUMA node to prevent cross-node memory access and VM performance deterioration. If the vCPU is not bound, by default, the vCPU can be scheduled on any physical CPU. The specific binding policy is determined by the user. - -#### Procedure - -Run the **virsh vcpupin** command to adjust the binding relationship between vCPUs and physical CPUs. - -- View the vCPU binding information of the VM. - - ``` - # virsh vcpupin openEulerVM - VCPU CPU Affinity - ---------------------- - 0 0-63 - 1 0-63 - 2 0-63 - 3 0-63 - ``` - - This indicates that all vCPUs of VM **openEulerVM** can be scheduled on all physical CPUs of the host. - -- Online adjustment: Run the **vcpu vcpupin** command with the **--live** parameter to modify the vCPU binding relationship of a running VM. - - ``` - # virsh vcpupin openEulerVM --live 0 2-3 - - # virsh vcpupin euler - VCPU CPU Affinity - ---------------------- - 0 2-3 - 1 0-63 - 2 0-63 - 3 0-63 - ``` - - The preceding commands bind vCPU **0** of VM **openEulerVM** to pCPU **2** and pCPU **3**. That is, vCPU **0** is scheduled only on the two physical CPUs. The binding relationship takes effect immediately but becomes invalid after the VM is shut down and restarted. - -- Permanent adjustment: Run the **virsh vcpupin** command with the **--config** parameter to modify the vCPU binding relationship of the VM in the libvirt internal configuration. - - ``` - # virsh vcpupin openEulerVM --config 0 0-3,^1 - - # virsh vcpupin openEulerVM - VCPU CPU Affinity - ---------------------- - 0 0,2-3 - 1 0-63 - 2 0-63 - 3 0-63 - ``` - - The preceding commands bind vCPU **0** of VM **openEulerVM** to physical CPUs **0**, **2**, and **3**. That is, vCPU **0** is scheduled only on the three physical CPUs. The modification of the binding relationship does not take effect immediately. Instead, the modification takes effect after the next startup of the VM and takes effect permanently. - -### CPU Hot Add - -#### Overview - -This feature allows users to hot add CPUs to a running VM without affecting its normal running. When the internal service pressure of a VM keeps increasing, all CPUs will be overloaded. To improve the computing capability of the VM, you can use the CPU hot add function to increase the number of CPUs on the VM without stopping it. - -#### Constraints - -- For processors using the AArch64 architecture, the specified VM chipset type \(machine\) needs to be virt-4.1 or a later version when a VM is created. For processors using the x86\_64 architecture, the specified VM chipset type \(machine\) needs to be pc-i440fx-1.5 or a later version when a VM is created. -- When configuring Guest NUMA, you need to configure the vCPUs that belong to the same socket in the same vNode. Otherwise, the VM may be soft locked up after the CPU is hot added, which may cause the VM panic. -- VMs do not support CPU hot add during migration, hibernation, wake-up, or snapshot. -- Whether the hot added CPU can automatically go online depends on the VM OS logic rather than the virtualization layer. -- CPU hot add is restricted by the maximum number of CPUs supported by the Hypervisor and GuestOS. -- When a VM is being started, stopped, or restarted, the hot added CPU may become invalid. However, the hot added CPU takes effect after the VM is restarted. -- During VM CPU hot add, if the number of added CPUs is not an integer multiple of the number of cores in the VM CPU topology configuration item, the CPU topology displayed in the VM may be disordered. You are advised to add CPUs whose number is an integer multiple of the number of cores each time. -- If the hot added CPU needs to take effect online and is still valid after the VM is restarted, the --config and --live options need to be transferred to the virsh setvcpus API to persist the hot added CPU. - -#### Procedure - -**VM XML Configuration** - -1. To use the CPU hot add function, configure the number of CPUs, the maximum number of CPUs supported by the VM, and the VM chipset type when creating the VM. (For the AArch64 architecture, the virt-4.1 or a later version is required. For the x86\_64 architecture, the pc-i440fx-1.5 or later version is required. The AArch64 VM is used as an example. The configuration template is as follows: - - ``` - - ... - n - - hvm - - ... - - ``` - - >![](./public_sys-resources/icon-note.gif) **Note** - >- The value of placement must be static. - >- m indicates the current number of CPUs on the VM, that is, the default number of CPUs after the VM is started. n indicates the maximum number of CPUs that can be hot added to a VM. The value cannot exceed the maximum CPU specifications supported by the Hypervisor or GuestOS. n is greater than or equal to m. - - For example, if the current number of CPUs of a VM is 4 and the maximum number of hot added CPUs is 64, the XML configuration is as follows: - - ``` - - …… - 64 - - hvm - - …… - ``` - - -**Hot Adding and Bringing CPUs Online** - -1. If the hot added CPU needs to be automatically brought online, create the udev rules file /etc/udev/rules.d/99-hotplug-cpu.rules in the VM as user root and define the udev rules in the file. The following is an example: - - ``` - ### automatically online hot-plugged cpu - ACTION=="add", SUBSYSTEM=="cpu", ATTR{online}="1" - ``` - - >![](./public_sys-resources/icon-note.gif) **Note** - >If you do not use the udev rules, you can use the root permission to manually bring the hot added CPU online by running the following command: - >``` - >for i in `grep -l 0 /sys/devices/system/cpu/cpu*/online` - >do - > echo 1 > $i - >done - >``` - -2. Use the virsh tool to hot add CPUs to the VM. For example, to set the number of CPUs after hot adding to 6 on the VM named openEulerVM and make the hot add take effect online, run the following command: - - ``` - virsh setvcpus openEulerVM 6 --live - ``` - - >![](./public_sys-resources/icon-note.gif) **Note** - >The format for running the virsh setvcpus command to hot add a VM CPU is as follows: - >``` - >virsh setvcpus [--config] [--live] - >``` - >- domain: Parameter, which is mandatory. Specifies the name of a VM. - >- count: Parameter, which is mandatory. Specifies the number of target CPUs, that is, the number of CPUs after hot adding. - >- --config: Option, which is optional. This parameter is still valid when the VM is restarted. - >- --live: Option, which is optional. The configuration takes effect online. - - -## Managing Virtual Memory - -### Introduction to NUMA - -Traditional multi-core computing uses the symmetric multi-processor \(SMP\) mode. Multiple processors are connected to a centralized memory and I/O bus. All processors can access only the same physical memory. Therefore, the SMP system is also referred to as a uniform memory access \(UMA\) system. Uniformity means that a processor can only maintain or share a unique value for each data record in memory at any time. Obviously, the disadvantage of SMP is its limited scalability, because when the memory and the I/O interface are saturated, adding a processor cannot obtain higher performance. - -The non-uniform memory access architecture \(NUMA\) is a distributed memory access mode. In this mode, a processor can access different memory addresses at the same time, which greatly improves concurrency. With this feature, a processor is divided into multiple nodes, each of which is allocated a piece of local memory space. The processors of all nodes can access all physical memories, but the time required for accessing the memory on the local node is much shorter than that on a remote node. - -### Configuring Host NUMA - -To improve VM performance, you can specify NUMA nodes for a VM using the VM XML configuration file before the VM is started so that the VM memory is allocated to the specified NUMA nodes. This feature is usually used together with the vCPU to prevent the vCPU from remotely accessing the memory. - -#### Procedure - -- Check the NUMA topology of the host. - - ``` - # numactl -H - available: 4 nodes (0-3) - node 0 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - node 0 size: 31571 MB - node 0 free: 17095 MB - node 1 cpus: 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - node 1 size: 32190 MB - node 1 free: 28057 MB - node 2 cpus: 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 - node 2 size: 32190 MB - node 2 free: 10562 MB - node 3 cpus: 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 - node 3 size: 32188 MB - node 3 free: 272 MB - node distances: - node 0 1 2 3 - 0: 10 15 20 20 - 1: 15 10 20 20 - 2: 20 20 10 15 - 3: 20 20 15 10 - ``` - -- Add the **numatune** field to the VM XML configuration file to create and start the VM. For example, to allocate NUMA node 0 on the host to the VM, configure parameters as follows: - - ``` - - - - ``` - - If the vCPU of the VM is bound to the physical CPU of **node 0**, the performance deterioration caused by the vCPU accessing the remote memory can be avoided. - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >- The sum of memory allocated to the VM cannot exceed the remaining available memory of the NUMA node. Otherwise, the VM may fail to start. - >- You are advised to bind the VM memory and vCPU to the same NUMA node to avoid the performance deterioration caused by vCPU access to the remote memory. For example, bind the vCPU to NUMA node 0 as well. - - -### Configuring Guest NUMA - -Many service software running on VMs is optimized for the NUMA architecture, especially for large-scale VMs. openEuler provides the Guest NUMA feature to display the NUMA topology in VMs. You can identify the structure to optimize the performance of service software and ensure better service running. - -When configuring guest NUMA, you can specify the location of vNode memory on the host to implement memory block binding and vCPU binding so that the vCPU and memory on the vNode are on the same physical NUMA node. - -#### Procedure - -After Guest NUMA is configured in the VM XML configuration file, you can view the NUMA topology on the VM. **** is mandatory for Guest NUMA. - -``` - - - - - - - - - - - [...] - - - - - - -``` - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->- **** provides the NUMA topology function for VMs. **cell id** indicates the vNode ID, **cpus** indicates the vCPU ID, and **memory** indicates the memory size on the vNode. ->- If you want to use Guest NUMA to provide better performance, configure <**numatune\>** and **** so that the vCPU and memory are distributed on the same physical NUMA node. -> - **cellid** in **** corresponds to **cell id** in ****. **mode** can be set to **strict** \(apply for memory from a specified node strictly. If the memory is insufficient, the application fails.\), **preferred** \(apply for memory from a node first. If the memory is insufficient, apply for memory from another node\), or **interleave** \(apply for memory from a specified node in cross mode\).; **nodeset** indicates the specified physical NUMA node. -> - In ****, you need to bind the vCPU in the same **cell id** to the physical NUMA node that is the same as the **memnode**. - - - -### Memory Hot Add - -#### Overview -In virtualization scenarios, the memory, CPU, and external devices of VMs are simulated by software. Therefore, the memory can be adjusted online for VMs at the virtualization bottom layer. In the current openEuler version, memory can be added to a VM online. If the physical memory of a VM is insufficient and the VM cannot be shut down, you can use this feature to add physical memory resources to the VM. - -#### Constraints - -- For processors using the AArch64 architecture, the specified VM chipset type \(machine\) needs to be virt-4.1 or a later version when a VM is created.For processors using the x86 architecture, the specified VM chipset type \(machine\) needs to be a later version than pc-i440fx-1.5 when a VM is created. -- Guest NUMA on which the memory hot add feature depends needs to be configured on the VM. Otherwise, the memory hot add process cannot be completed. -- When hot adding memory, you need to specify the ID of Guest NUMA node to which the new memory belongs. Otherwise, the memory hot add fails. -- The VM kernel should support memory hot add. Otherwise, the VM cannot identify the newly added memory or the memory cannot be brought online. -- For a VM that uses hugepages, the capacity of the hot added memory should be an integral multiple of hugepagesz. Otherwise, the hot add fails. -- The hot added memory size should be an integral multiple of the Guest physical memory block size (block\_size\_bytes). Otherwise, the VM cannot go online. The value of block\_size\_bytes can be obtained using the lsmem command in Guest. -- After n pieces of virtio-net NICs are configured, the maximum number of hot add times is set to min\{max\_slot, 64 - n\} to reserve slots for NICs. -- The vhost-user device and the memory hot add feature are mutually exclusive. A VM configured with the vhost-user device does not support memory hot add. After the memory is hot added to a VM, the vhost-user device cannot be hot added. -- If the VM OS is Linux, ensure that the initial memory is greater than or equal to 4 GB. -- If the VM OS is Windows, the first hot added memory needs to be specified to Guest NUMA node0. Otherwise, the hot added memory cannot be identified by the VM. -- In passthrough scenarios, memory needs to be allocated in advance. Therefore, it is normal that the startup and hot add of memory are slower than those of common VMs (especially large-specification VMs). -- It is recommended that the ratio of the available memory to the hot added memory be at least 1:32. That is, at least 1 GB available memory is required for the VM with 32 GB hot added memory. If the ratio is less than 1:32, the VM may be suspended. -- Whether the hot added memory can automatically go online depends on the VM OS logic. You can manually bring the memory online or configure the udev rules to automatically bring the memory online. - -#### Procedure - -**VM XML Configuration** - -1. To use the memory hot add function, configure the maximum hot add memory size and reserved slot number, and configure the Guest NUMA topology when creating a VM. - - For example, run the following command to configure 32 GB initial memory for a VM, reserve 256 slots, set the memory upper limit to 1 TB, and configure two NUMA nodes: - - ``` - - 32 - 1024 - - - - - - - - .... - ``` - - ->![](./public_sys-resources/icon-note.gif) **Note** ->In the preceding information, ->the value of slots in the maxMemory field indicates the reserved memory slots. The maximum value is 256. ->maxMemory indicates the maximum physical memory supported by the VM. ->For details about how to configure Guest NUMA, see "Configuring Guest NUMA." - -**Hot Adding and Bringing Memory Online** - -1. If the hot added memory needs to be automatically brought online, create the udev rules file /etc/udev/rules.d/99-hotplug-memory.rules in the VM as user root and define the udev rules in the file. The following is an example: - - ``` - ### automatically online hot-plugged memory - ACTION=="add", SUBSYSTEM=="memory", ATTR{state}="online" - ``` - -2. Create a memory description XML file based on the size of the memory to be hot added and the Guest NUMA node of the VM. - - For example, to hot add 1 GB memory to NUMA node0, run the following command: - - ``` - - - 1024 - 0 - - - ``` - -3. Run the virsh attach-device command to hot add memory to the VM. In the command, openEulerVM indicates the VM name, memory.xml indicates the description file of the hot added memory, and --live indicates that the hot added memory takes effect online. You can also run the --config command to persist the hot added memory to the VM XML file. - - ``` - ### virsh attach-device openEulerVM memory.xml --live - ``` - - >![](./public_sys-resources/icon-note.gif) **Note** - >If you do not use the udev rules, you can use the root permission to manually bring the hot added memory online by running the following command: - >``` - >for i in `grep -l offline /sys/devices/system/memory/memory*/state` - >do - > echo online > $i - >done - >``` - - diff --git a/docs/en/docs/Virtualization/tool-guide.md b/docs/en/docs/Virtualization/tool-guide.md deleted file mode 100644 index d22f203deaa05c79cb05eebb48ef455c5b2dce8f..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/tool-guide.md +++ /dev/null @@ -1,140 +0,0 @@ -# Tool Guide - -- [vmtop](#vmtop) - -## vmtop - -### Overview -vmtop is a user-mode tool running on the host machine. You can use the vmtop tool to dynamically view the usage of VM resources in real time, such as CPU usage, memory usage, and the number of vCPU traps. Therefore, the vmtop tool can be used to locate virtualization problems and optimize performance. - -The vmtop monitoring items are as follows (sampling difference: difference between two data obtained at a specified interval): -- VM/task-name: VM/Process name -- DID: VM ID -- PID: PID of the qemu process of the VM -- %CPU: CPU usage of a process -- EXThvc: Number of hvc-exits (sampling difference) -- EXTwfe: Number of wfe-exits (sampling difference) -- EXTwfi: Number of wfi-exits (sampling difference) -- EXTmmioU: Number of mmioU-exits (sampling difference) -- EXTmmioK: Number of mmioK-exits (sampling difference) -- EXTfp: Number of fp-exits (sampling difference) -- EXTirq: Number of irq-exits (sampling difference) -- EXTsys64: Number of sys64 exits (sampling difference) -- EXTmabt: Number of mem abort exits (sampling difference) -- EXTsum: Total number of KVM exits (sampling difference) -- S: Process status -- P: Physical CPU usage of a process -- %ST: Ratio of the preemption time to the CPU running time (KVM data) -- %GUE: Ratio of the VM internal occupation time to the CPU running time (KVM data) -- %HYP: Virtualization overhead ratio (KVM data) - -### Usage -vmtop is a command line tool. You can directly run the vmtop in command line mode. -In addition, the vmtop tool provides different options for querying different information. - -#### Syntax -```sh -vmtop [option] -``` - -#### Option Description -- -d: sets the refresh interval, in seconds. -- -H: displays the VM thread information. -- -n: sets the number of refresh times and exits after the refresh is complete. -- -b: displays Batch mode, which can be used to redirect to a file. -- -h: displays help information. -- -v: displays versions. - -#### Keyboard Shortcut -Shortcut key used when the vmtop is running. -- H: displays or stops the VM thread information. The information is displayed by default. -- up/down: moves the VM list upwards or downwards. -- left/right: moves the cursor leftwards or rightwards to display the columns that are hidden due to the screen width. -- f: enters the editing mode of a monitoring item and selects the monitoring item to be enabled. -- q: exits the vmtop process. - -### Example -Run the vmtop command on the host. -```sh -vmtop -``` -The command output is as follows: -```sh -vmtop - 2020-09-14 09:54:48 - 1.0 -Domains: 1 running - - DID VM/task-name PID %CPU EXThvc EXTwfe EXTwfi EXTmmioU EXTmmioK EXTfp EXTirq EXTsys64 EXTmabt EXTsum S P %ST %GUE %HYP - 2 example 4054916 13.0 0 0 1206 10 0 144 62 174 0 1452 S 106 0.0 99.7 16.0 -``` -As shown in the output, there is only one VM named "example" on the host. The ID is 2. The CPU usage is 13.0%. The total number of traps within one second is 1452. The physical CPU occupied by the VM process is CPU 106. The ratio of the VM internal occupation time to the CPU running time is 99.7%. - -1. Display VM thread information. -Press H to display the thread information. -```sh -vmtop - 2020-09-14 10:11:27 - 1.0 -Domains: 1 running - - DID VM/task-name PID %CPU EXThvc EXTwfe EXTwfi EXTmmioU EXTmmioK EXTfp EXTirq EXTsys64 EXTmabt EXTsum S P %ST %GUE %HYP - 2 example 4054916 13.0 0 0 1191 17 4 120 76 147 0 1435 S 119 0.0 123.7 4.0 - |_ qemu-kvm 4054916 0.0 0 0 0 0 0 0 0 0 0 0 S 119 0.0 0.0 0.0 - |_ qemu-kvm 4054928 0.0 0 0 0 0 0 0 0 0 0 0 S 119 0.0 0.0 0.0 - |_ signalfd_com 4054929 0.0 0 0 0 0 0 0 0 0 0 0 S 120 0.0 0.0 0.0 - |_ IO mon_iothr 4054932 0.0 0 0 0 0 0 0 0 0 0 0 S 117 0.0 0.0 0.0 - |_ CPU 0/KVM 4054933 3.0 0 0 280 6 4 28 19 41 0 350 S 105 0.0 27.9 0.0 - |_ CPU 1/KVM 4054934 3.0 0 0 260 0 0 16 12 36 0 308 S 31 0.0 20.0 0.0 - |_ CPU 2/KVM 4054935 3.0 0 0 341 0 0 44 20 26 0 387 R 108 0.0 27.9 4.0 - |_ CPU 3/KVM 4054936 5.0 0 0 310 11 0 32 25 44 0 390 S 103 0.0 47.9 0.0 - |_ memory_lock 4054940 0.0 0 0 0 0 0 0 0 0 0 0 S 126 0.0 0.0 0.0 - |_ vnc_worker 4054944 0.0 0 0 0 0 0 0 0 0 0 0 S 118 0.0 0.0 0.0 - |_ worker 4143738 0.0 0 0 0 0 0 0 0 0 0 0 S 120 0.0 0.0 0.0 -``` -The example VM has 11 threads, including the vCPU thread, vnc_worker, and IO mon_iotreads. Each thread also displays detailed CPU usage and trap information. - -2. Select the monitoring item. -Enter f to edit the monitoring item. -```sh -field filter - select which field to be showed -Use up/down to navigate, use space to set whether chosen filed to be showed -'q' to quit to normal display - - * DID - * VM/task-name - * PID - * %CPU - * EXThvc - * EXTwfe - * EXTwfi - * EXTmmioU - * EXTmmioK - * EXTfp - * EXTirq - * EXTsys64 - * EXTmabt - * EXTsum - * S - * P - * %ST - * %GUE - * %HYP -``` -By default, all monitoring items are displayed. You can press the up or down key to select a monitoring item. Press the space bar to set the monitoring item, and press q to exit. -After %ST, %GUE, and %HYP are hidden, the following information is displayed: -```sh -vmtop - 2020-09-14 10:23:25 - 1.0 -Domains: 1 running - - DID VM/task-name PID %CPU EXThvc EXTwfe EXTwfi EXTmmioU EXTmmioK EXTfp EXTirq EXTsys64 EXTmabt EXTsum S P - 2 example 4054916 12.0 0 0 1213 14 1 144 68 168 0 1464 S 125 - |_ qemu-kvm 4054916 0.0 0 0 0 0 0 0 0 0 0 0 S 125 - |_ qemu-kvm 4054928 0.0 0 0 0 0 0 0 0 0 0 0 S 119 - |_ signalfd_com 4054929 0.0 0 0 0 0 0 0 0 0 0 0 S 120 - |_ IO mon_iothr 4054932 0.0 0 0 0 0 0 0 0 0 0 0 S 117 - |_ CPU 0/KVM 4054933 2.0 0 0 303 6 0 29 10 35 0 354 S 98 - |_ CPU 1/KVM 4054934 4.0 0 0 279 0 0 39 17 49 0 345 S 1 - |_ CPU 2/KVM 4054935 3.0 0 0 283 0 0 33 20 40 0 343 S 122 - |_ CPU 3/KVM 4054936 3.0 0 0 348 8 1 43 21 44 0 422 S 110 - |_ memory_lock 4054940 0.0 0 0 0 0 0 0 0 0 0 0 S 126 - |_ vnc_worker 4054944 0.0 0 0 0 0 0 0 0 0 0 0 S 118 - |_ worker 1794 0.0 0 0 0 0 0 0 0 0 0 0 S 126 -``` -%ST, %GUE, and %HYP will not be displayed on the screen. diff --git a/docs/en/docs/Virtualization/user-and-administrator-guide.md b/docs/en/docs/Virtualization/user-and-administrator-guide.md deleted file mode 100644 index 47646f59b84e0a9b7e9952054286eba74f6ebdd1..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/user-and-administrator-guide.md +++ /dev/null @@ -1,437 +0,0 @@ -# User and Administrator Guide - -This chapter describes how to create VMs on the virtualization platform, manage VM life cycles, and query information. - - - -- [Best Practices](#best-practices) - - [Performance Best Practices](#performance-best-practices) - - [Halt-Polling](#halt-polling) - - [I/O Thread Configuration](#i-o-thread-configuration) - - [Raw Device Mapping](#raw-device-mapping) - - [kworker Isolation and Binding](#kworker-isolation-and-binding) - - [HugePage Memory](#hugepage-memory) - - [Security Best Practices](#security-best-practices) - - [Libvirt Authentication](#libvirt-authentication) - - [qemu-ga](#qemu-ga) - - [sVirt Protection](#svirt-protection) - - -## Best Practices - -### Performance Best Practices -#### Halt-Polling - -##### Overview - -If compute resources are sufficient, the halt-polling feature can be used to enable VMs to obtain performance similar to that of physical machines. If the halt-polling feature is not enabled, the host allocates CPU resources to other processes when the vCPU exits due to idle timeout. When the halt-polling feature is enabled on the host, the vCPU of the VM performs polling when it is idle. The polling duration depends on the actual configuration. If the vCPU is woken up during the polling, the vCPU can continue to run without being scheduled from the host. This reduces the scheduling overhead and improves the VM system performance. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The halt-polling mechanism ensures that the vCPU thread of the VM responds in a timely manner. However, when the VM has no load, the host also performs polling. As a result, the host detects that the CPU usage of the vCPU is high, but the actual CPU usage of the VM is not high. - -##### Instructions - -The halt-polling feature is enabled by default. You can dynamically change the halt-polling time of vCPU by modifying the **halt\_poll\_ns** file. The default value is **500000**, in ns. - -For example, to set the polling duration to 400,000 ns, run the following command: - -``` -# echo 400000 > /sys/module/kvm/parameters/halt_poll_ns -``` - -#### I/O Thread Configuration - -##### Overview - -By default, QEMU main threads handle backend VM read and write operations on the KVM. This causes the following issues: - -- VM I/O requests are processed by a QEMU main thread. Therefore, the single-thread CPU usage becomes the bottleneck of VM I/O performance. -- The QEMU global lock \(qemu\_global\_mutex\) is used when VM I/O requests are processed by the QEMU main thread. If the I/O processing takes a long time, the QEMU main thread will occupy the global lock for a long time. As a result, the VM vCPU cannot be scheduled properly, affecting the overall VM performance and user experience. - -You can configure the I/O thread attribute for the virtio-blk disk or virtio-scsi controller. At the QEMU backend, an I/O thread is used to process read and write requests of a virtual disk. The mapping relationship between the I/O thread and the virtio-blk disk or virtio-scsi controller can be a one-to-one relationship to minimize the impact on the QEMU main thread, enhance the overall I/O performance of the VM, and improve user experience. - -##### Configuration Description - -To use I/O threads to process VM disk read and write requests, you need to modify VM configurations as follows: - -- Configure the total number of high-performance virtual disks on the VM. For example, set **** to **4** to control the total number of I/O threads. - - ``` - - VMName - 4194304 - 4194304 - 4 - 4 - ``` - -- Configure the I/O thread attribute for the virtio-blk disk. **** indicates I/O thread IDs. The IDs start from 1 and each ID must be unique. The maximum ID is the value of ****. For example, to allocate I/O thread 2 to the virtio-blk disk, set parameters as follows: - - ``` - - - - -
- - ``` - -- Configure the I/O thread attribute for the virtio-scsi controller. For example, to allocate I/O thread 2 to the virtio-scsi controller, set parameters as follows: - - ``` - - - -
- - ``` - -- Bind I/O threads to a physical CPU. - - Binding I/O threads to specified physical CPUs does not affect the resource usage of vCPU threads. **** indicates I/O thread IDs, and **** indicates IDs of the bound physical CPUs. - - ``` - - - - - ``` - - -#### Raw Device Mapping - -##### Overview - -When configuring VM storage devices, you can use configuration files to configure virtual disks for VMs, or connect block devices \(such as physical LUNs and LVs\) to VMs for use to improve storage performance. The latter configuration method is called raw device mapping \(RDM\). Through RDM, a virtual disk is presented as a small computer system interface \(SCSI\) device to the VM and supports most SCSI commands. - -RDM can be classified into virtual RDM and physical RDM based on backend implementation features. Compared with virtual RDM, physical RDM provides better performance and more SCSI commands. However, for physical RDM, the entire SCSI disk needs to be mounted to a VM for use. If partitions or logical volumes are used for configuration, the VM cannot identify the disk. - -##### Configuration Example - -VM configuration files need to be modified for RDM. The following is a configuration example. - -- Virtual RDM - - The following is an example of mounting the SCSI disk **/dev/sdc** on the host to the VM as a virtual raw device: - - ``` - - - ... - - - - - -
- - ... - - - ``` - - -- Physical RDM - - The following is an example of mounting the SCSI disk **/dev/sdc** on the host to the VM as a physical raw device: - - ``` - - - ... - - - - - -
- - ... - - - ``` - - -#### kworker Isolation and Binding - -##### Overview - -kworker is a per-CPU thread implemented by the Linux kernel. It is used to execute workqueue requests in the system. kworker threads will compete for physical core resources with vCPU threads, resulting in virtualization service performance jitter. To ensure that the VM can run stably and reduce the interference of kworker threads on the VM, you can bind kworker threads on the host to a specific CPU. - -##### Instructions - -You can modify the **/sys/devices/virtual/workqueue/cpumask** file to bind tasks in the workqueue to the CPU specified by **cpumasks**. Masks in **cpumask** are in hexadecimal format. For example, if you need to bind kworker to CPU0 to CPU7, run the following command to change the mask to **ff**: - -``` -# echo ff > /sys/devices/virtual/workqueue/cpumask -``` - -#### HugePage Memory - -##### Overview - -Compared with traditional 4 KB memory paging, openEuler also supports 2 MB/1 GB memory paging. HugePage memory can effectively reduce TLB misses and significantly improve the performance of memory-intensive services. openEuler uses two technologies to implement HugePage memory. - -- Static HugePages - - The static HugePage requires that a static HugePage pool be reserved before the host OS is loaded. When creating a VM, you can modify the XML configuration file to specify that the VM memory is allocated from the static HugePage pool. The static HugePage ensures that all memory of a VM exists on the host as the HugePage to ensure physical continuity. However, the deployment difficulty is increased. After the page size of the static HugePage pool is changed, the host needs to be restarted for the change to take effect. The size of a static HugePage can be 2 MB or 1 GB. - - -- THP - - If the transparent HugePage \(THP\) mode is enabled, the VM automatically selects available 2 MB consecutive pages and automatically splits and combines HugePages when allocating memory. When no 2 MB consecutive pages are available, the VM selects available 64 KB \(AArch64 architecture\) or 4 KB \(x86\_64 architecture\) pages for allocation. By using THP, users do not need to be aware of it and 2 MB HugePages can be used to improve memory access performance. - - -If VMs use static HugePages, you can disable THP to reduce the overhead of the host OS and ensure stable VM performance. - -##### Instructions - -- Configure static HugePages. - - Before creating a VM, modify the XML file to configure a static HugePage for the VM. - - ``` - - - - - - ``` - - The preceding XML segment indicates that a 1 GB static HugePage is configured for the VM. - - ``` - - - - - - ``` - - The preceding XML segment indicates that a 2 MB static HugePage is configured for the VM. - -- Configure transparent HugePage. - - Dynamically enable the THP through sysfs. - - ``` - # echo always > /sys/kernel/mm/transparent_hugepage/enabled - ``` - - Dynamically disable the THP. - - ``` - # echo never > /sys/kernel/mm/transparent_hugepage/enabled - ``` - - -### security Best Practices - -#### Libvirt Authentication - -##### Overview - -When a user uses libvirt remote invocation but no authentication is performed, any third-party program that connects to the host's network can operate VMs through the libvirt remote invocation mechanism. This poses security risks. To improve system security, openEuler provides the libvirt authentication function. That is, users can remotely invoke a VM through libvirt only after identity authentication. Only specified users can access the VM, thereby protecting VMs on the network. - -##### Enabling Libvirt Authentication - -By default, the libvirt remote invocation function is disabled on openEuler. This following describes how to enable the libvirt remote invocation and libvirt authentication functions. - -1. Log in to the host. -2. Modify the libvirt service configuration file **/etc/libvirt/libvirtd.conf** to enable the libvirt remote invocation and libvirt authentication functions. For example, to enable the TCP remote invocation that is based on the Simple Authentication and Security Layer \(SASL\) framework, configure parameters by referring to the following: - - ``` - #Transport layer security protocol. The value 0 indicates that the protocol is disabled, and the value 1 indicates that the protocol is enabled. You can set the value as needed. - listen_tls = 0 - #Enable the TCP remote invocation. To enable the libvirt remote invocation and libvirt authentication functions, set the value to 1. - listen_tcp = 1 - #User-defined protocol configuration for TCP remote invocation. The following uses sasl as an example. - auth_tcp = "sasl" - ``` - -3. Modify the **/etc/sasl2/libvirt.conf** configuration file to set the SASL mechanism and SASLDB. - - ``` - #Authentication mechanism of the SASL framework. - mech_list: digest-md5 - #Database for storing usernames and passwords - sasldb_path: /etc/libvirt/passwd.db - ``` - -4. Add the user for SASL authentication and set the password. Take the user **userName** as an example. The command is as follows: - - ``` - # saslpasswd2 -a libvirt userName - Password: - Again (for verification): - ``` - -5. Modify the **/etc/sysconfig/libvirtd** configuration file to enable the libvirt listening option. - - ``` - LIBVIRTD_ARGS="--listen" - ``` - -6. Restart the libvirtd service to make the modification to take effect. - - ``` - # systemctl restart libvirtd - ``` - -7. Check whether the authentication function for libvirt remote invocation takes effect. Enter the username and password as prompted. If the libvirt service is successfully connected, the function is successfully enabled. - - ``` - # virsh -c qemu+tcp://192.168.0.1/system - Please enter your authentication name: openeuler - Please enter your password: - Welcome to virsh, the virtualization interactive terminal. - - Type: 'help' for help with commands - 'quit' to quit - - virsh # - ``` - - -##### Managing SASL - -The following describes how to manage SASL users. - -- Query an existing user in the database. - - ``` - # sasldblistusers2 -f /etc/libvirt/passwd.db - user@localhost.localdomain: userPassword - ``` - -- Delete a user from the database. - - ``` - # saslpasswd2 -a libvirt -d user - ``` - - -#### qemu-ga - -##### Overview - -QEMU guest agent \(qemu-ga\) is a daemon running within VMs. It allows users on a host OS to perform various management operations on the guest OS through outband channels provided by QEMU. The operations include file operations \(open, read, write, close, seek, and flush\), internal shutdown, VM suspend \(suspend-disk, suspend-ram, and suspend-hybrid\), and obtaining of VM internal information \(including the memory, CPU, NIC, and OS information\). - -In some scenarios with high security requirements, qemu-ga provides the blacklist function to prevent internal information leakage of VMs. You can use a blacklist to selectively shield some functions provided by qemu-ga. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The qemu-ga installation package is **qemu-guest-agent-**_xx_**.rpm**. It is not installed on openEuler by default. _xx_ indicates the actual version number. - -##### Procedure - -To add a qemu-ga blacklist, perform the following steps: - -1. Log in to the VM and ensure that the qemu-guest-agent service exists and is running. - - ``` - # systemctl status qemu-guest-agent |grep Active - Active: active (running) since Wed 2018-03-28 08:17:33 CST; 9h ago - ``` - -2. Query which **qemu-ga** commands can be added to the blacklist: - - ``` - # qemu-ga --blacklist ? - guest-sync-delimited - guest-sync - guest-ping - guest-get-time - guest-set-time - guest-info - ... - ``` - - -1. Set the blacklist. Add the commands to be shielded to **--blacklist** in the **/usr/lib/systemd/system/qemu-guest-agent.service** file. Use spaces to separate different commands. For example, to add the **guest-file-open** and **guest-file-close** commands to the blacklist, configure the file by referring to the following: - - ``` - [Service] - ExecStart=-/usr/bin/qemu-ga \ - --blacklist=guest-file-open guest-file-close - ``` - - -1. Restart the qemu-guest-agent service. - - ``` - # systemctl daemon-reload - # systemctl restart qemu-guest-agent - ``` - -2. Check whether the qemu-ga blacklist function takes effect on the VM, that is, whether the **--blacklist** parameter configured for the qemu-ga process is correct. - - ``` - # ps -ef|grep qemu-ga|grep -E "blacklist=|b=" - root 727 1 0 08:17 ? 00:00:00 /usr/bin/qemu-ga --method=virtio-serial --path=/dev/virtio-ports/org.qemu.guest_agent.0 --blacklist=guest-file-open guest-file-close guest-file-read guest-file-write guest-file-seek guest-file-flush -F/etc/qemu-ga/fsfreeze-hook - ``` - - >![](./public_sys-resources/icon-note.gif) **NOTE:** - >For more information about qemu-ga, visit [https://wiki.qemu.org/Features/GuestAgent](https://wiki.qemu.org/Features/GuestAgent). - - -#### sVirt Protection - -##### Overview - -In a virtualization environment that uses the discretionary access control \(DAC\) policy only, malicious VMs running on hosts may attack the hypervisor or other VMs. To improve security in virtualization scenarios, openEuler uses sVirt for protection. sVirt is a security protection technology based on SELinux. It is applicable to KVM virtualization scenarios. A VM is a common process on the host OS. In the hypervisor, the sVirt mechanism labels QEMU processes corresponding to VMs with SELinux labels. In addition to types which are used to label virtualization processes and files, different categories are used to label different VMs. Each VM can access only file devices of the same category. This prevents VMs from accessing files and devices on unauthorized hosts or other VMs, thereby preventing VM escape and improving host and VM security. - -##### Enabling sVirt Protection - -1. Enable SELinux on the host. - 1. Log in to the host. - 2. Enable the SELinux function on the host. - 1. Modify the system startup parameter file **grub.cfg** to set **selinux** to **1**. - - ``` - selinux=1 - ``` - - 2. Modify **/etc/selinux/config** to set the **SELINUX** to **enforcing**. - - ``` - SELINUX=enforcing - ``` - - 3. Restart the host. - - ``` - # reboot - ``` - - - -1. Create a VM where the sVirt function is enabled. - 1. Add the following information to the VM configuration file: - - ``` - - ``` - - Or check whether the following configuration exists in the file: - - ``` - - ``` - - 2. Create a VM. - - ``` - # virsh define openEulerVM.xml - ``` - -2. Check whether sVirt is enabled. - - Run the following command to check whether sVirt protection has been enabled for the QEMU process of the running VM. If **svirt\_t:s0:c** exists, sVirt protection has been enabled. - - ``` - # ps -eZ|grep qemu |grep "svirt_t:s0:c" - system_u:system_r:svirt_t:s0:c200,c947 11359 ? 00:03:59 qemu-kvm - system_u:system_r:svirt_t:s0:c427,c670 13790 ? 19:02:07 qemu-kvm - ``` - - diff --git a/docs/en/docs/Virtualization/virtualization.md b/docs/en/docs/Virtualization/virtualization.md deleted file mode 100644 index a1f5afec48b25bdc09791f92062192aa24e277f1..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/virtualization.md +++ /dev/null @@ -1,3 +0,0 @@ -# Virtualization User Guide - -This document describes virtualization, installation method and usage of openEuler-based virtualization, and guidance for users and administrators to install and use virtualization. diff --git a/docs/en/docs/Virtualization/vm-configuration.md b/docs/en/docs/Virtualization/vm-configuration.md deleted file mode 100644 index e42e9ad02c921e04dec6a15df3af9f24b8736777..0000000000000000000000000000000000000000 --- a/docs/en/docs/Virtualization/vm-configuration.md +++ /dev/null @@ -1,877 +0,0 @@ -# VM Configuration - -- [VM Configuration](#vm-configuration) - - [Introduction](#introduction) - - [VM Description](#vm-description) - - [vCPU and Virtual Memory](#vcpu-and-virtual-memory) - - [Virtual Device Configuration](#virtual-device-configuration) - - [Storage Devices](#storage-devices) - - [Network Device](#network-device) - - [Bus Configuration](#bus-configuration) - - [Other Common Devices](#other-common-devices) - - [Configurations Related to the System Architecture](#configurations-related-to-the-system-architecture) - - [Other Common Configuration Items](#other-common-configuration-items) - - [XML Configuration File Example](#xml-configuration-file-example) - -## Introduction - -### Overview - -Libvirt tool uses XML files to describe a VM feature, including the VM name, CPU, memory, disk, NIC, mouse, and keyboard. You can manage a VM by modifying configuration files. This section describes the elements in the XML configuration file to help users configure VMs. - -### Format - -The VM XML configuration file uses domain as the root element, which contains multiple other elements. Some elements in the XML configuration file can contain corresponding attributes and attribute values to describe VM information in detail. Different attributes of the same element are separated by spaces. - -The basic format of the XML configuration file is as follows. In the format, **label** indicates the label name, **attribute** indicates the attribute, and **value** indicates the attribute value. Change them based on the site requirements. - -``` - - VMName - 8 - 4 - - - - - -``` - -### Process - -1. Create an XML configuration file with domain root element. -2. Use the name tag to specify a unique VM name based on the naming rule. -3. Configure system resources such as the virtual CPU \(vCPU\) and virtual memory. -4. Configure virtual devices. - 1. Configure storage devices. - 2. Configure network devices. - 3. Configure the external bus structure. - 4. Configure external devices such as the mouse. - -5. Save the XML configuration file. - -## VM Description - -### Overview - -This section describes how to configure the VM **domain** root element and VM name. - -### Elements - -- **domain**: Root element of a VM XML configuration file, which is used to configure the type of the hypervisor that runs the VM. - - **type**: Type of a domain in virtualization. In the openEuler virtualization, the attribute value is **kvm**. - -- **name**: VM name. - - The VM name is a unique character string on the same host. The VM name can contain only digits, letters, underscores \(\_\), hyphens \(-\), and colons \(:\), but cannot contain only digits. The VM name can contain a maximum of 64 characters. - - -### Configuration Example - -For example, if the VM name is **openEuler**, the configuration is as follows: - -``` - - openEuler - ... - -``` - -## vCPU and Virtual Memory - -### Overview - -This section describes how to configure the vCPU and virtual memory. - -### Elements - -- **vcpu**: The number of virtual processors. -- **memory**: The size of the virtual memory. - - **unit**: The memory unit. The value can be KiB \(210 bytes\), MiB \(220 bytes\), GiB \(230 bytes\), or TiB \(240 bytes\). - -- **cpu**: The mode of the virtual processor. - - **mode**: The mode of the vCPU. - - - **host-passthrough**: indicates that the architecture and features of the virtual CPU are the same as those of the host. - - - **custom**: indicates that the architecture and features of the virtual CPU are configured by the **cpu** element. - - Sub-element **topology**: A sub-element of the element cpu, used to describe the topology structure of a vCPU mode. - - - The attributes **socket**, **cores**, and **threads** of the sub-element topology describe the number of CPU sockets of a VM, the number of processor cores included in each CPU socket, and the number of threads included in each processor core, respectively. The attribute value is a positive integer, and the product of the three values equals the number of vCPUs. - - The ARM architecture supports the virtual hyper-threading function. The virtual CPU hot add and the virtual hyper-threading function are mutually exclusive. - Sub-element **model**: A sub-element of the element cpu, used to describe the CPU model when **mode** is custom. - - Sub-element **feature**: A sub-element of the element cpu, used to enable/disable a CPU feature when **mode** is custom. The attribute **name** describes the name of the CPU feature. And whether enable the CPU feature is controlled by the attribute **policy**: - - - **force**: force enable the CPU feature regardless of it being supported by host CPU. - - - **require**: enable the CPU feature. - - - **optional**: the CPU feature will be enabled if and only if it is supported by host CPU. - - - **disable**: disable the CPU feature. - - - **forbid**: disable the CPU feature and guest creation will fail if the feature is supported by host CPU. - - -### Configuration Example - -For example, if the number of vCPUs is 4, the processing mode is host-passthrough, the virtual memory is 8 GiB, the four CPUs are distributed in two CPU sockets, and hyperthreading is not supported, the configuration is as follows: - -``` - - ... - 4 - 8 - - - -... - -``` - -If the virtual memory is 8 GiB, the number of vCPUs is 4, the processing mode is custom, the CPU model is Kunpeng-920, and pmull is disabled, the configuration is as follows: - -``` - - ... - 4 - 8 - - Kunpeng-920 - - - ... - -``` - -## Virtual Device Configuration - -The VM XML configuration file uses the **devices** elements to configure virtual devices, including storage devices, network devices, buses, and mouse devices. This section describes how to configure common virtual devices. - -### Storage Devices - -#### Overview - -This section describes how to configure virtual storage devices, including floppy disks, disks, and CD-ROMs and their storage types. - -#### Elements - -The XML configuration file uses the **disk** element to configure storage devices. [Table 1](#table14200183410353) describes common **disk** attributes. [Table 2](#table4866134925114) describes common subelements and their attributes. - -**Table 1** Common attributes of the **disk** element - - - - - - - - - - - - - - - - - - -

Element

-

Attribute

-

Description

-

Attribute Value and Description

-

disk

-

type

-

Specifies the type of the backend storage medium.

-

block: block device

-

file: file device

-

dir: directory path

-

device

-

Specifies the storage medium to be presented to the VM.

-

disk: disk (default)

-

floppy: floppy disk

-

cdrom: CD-ROM

-
- -**Table 2** Common subelements and attributes of the **disk** element - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Subelement

-

Subelement Description

-

Attribute Description

-

source

-

Specifies the backend storage medium, which corresponds to the type specified by the type attribute of the disk element.

-

file: file type. The value is the fully qualified path of the corresponding file.

-

dev: block type. The value is the fully qualified path of the corresponding host device.

-

dir: directory type. The value is the fully qualified path of the disk directory.

-

driver

-

Details about the specified backend driver

-

type: disk format type. The value can be raw or qcow2, which must be the same as that of source.

-

io: disk I/O mode. The options are native and threads.

-

cache: disk cache mode. The value can be none, writethrough, writeback, or directsync.

-

iothread: I/O thread allocated to the disk.

-

target

-

The bus and device that a disk presents to a VM.

-

dev: specifies the logical device name of a disk, for example, sd[a-p] for SCSI, SATA, and USB buses and hd[a-d] for IDE disks.

-

bus: specifies the type of a disk. Common types include scsi, usb, sata, and virtio.

-

boot

-

The disk can be used as the boot disk.

-

order: specifies the disk startup sequence.

-

readonly

-

The disk is read-only and cannot be modified by the VM. Generally, it is used together with the CD-ROM drive.

-

-

-
- -#### Configuration Example - -After the VM image is prepared according to [Preparing a VM Image](#preparing-a-vm-image), you can use the following XML configuration file to configure the virtual disk for the VM. - -In this example, two I/O threads, one block disk device and one CD, are configured for the VM, and the first I/O thread is allocated to the block disk device for use. The backend medium of the disk device is in qcow2 format and is used as the preferred boot disk. - -``` - - ... - 2 - - - - - - - - - - - - - - - ... - - -``` - -### Network Device - -#### Overview - -The XML configuration file can be used to configure virtual network devices, including the ethernet mode, bridge mode, and vhostuser mode. This section describes how to configure vNICs. - -#### Elements - -In the XML configuration file, the element **interface** is used, and its attribute **type** indicates the mode of the vNIC. The options are **ethernet**, **bridge**, and **vhostuser**. The following uses the vNIC in bridge mode as an example to describe its subelements and attributes. - -**Table 1** Common subelements of a vNIC in bridge mode - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Subelement

-

Subelement Description

-

Attribute and Description

-

mac

-

The mac address of the vNIC.

-

address: specifies the mac address. If this parameter is not set, the system automatically generates a mac address.

-

target

-

Name of the backend vNIC.

-

dev: name of the created backend tap device.

-

source

-

Specify the backend of the vNIC.

-

bridge: used together with the bridge mode. The value is the bridge name.

-

boot

-

The NIC can be used for remote startup.

-

order: specifies the startup sequence of NICs.

-

model

-

Indicates the type of a vNIC.

-

type: virtio is usually used for the NIC in bridge mode.

-

virtualport

-

Port type

-

type: If an OVS bridge is used, set this parameter to openvswitch.

-

driver

-

Backend driver type

-

name: driver name. The value is vhost.

-

queues: the number of NIC queues.

-
- -#### Configuration Example - -- After creating the Linux bridge br0 by referring to [Preparing the VM Network](#preparing-the-vm-network), configure a vNIC of the VirtIO type bridged on the br0 bridge. The corresponding XML configuration is as follows: - - ``` - - ... - - - - - - ... - - - ``` - -- If an OVS network bridge is created according to [Preparing the VM Network](#preparing-the-vm-network), configure a VirtIO vNIC device that uses the vhost driver and has four queues. - - ``` - - ... - - - - - - - - ... - - - ``` - - -### Bus Configuration - -#### Overview - -The bus is a channel for information communication between components of a computer. An external device needs to be mounted to a corresponding bus, and each device is assigned a unique address \(specified by the subelement **address**\). Information exchange with another device or a central processing unit \(CPU\) is completed through the bus network. Common device buses include the ISA bus, PCI bus, USB bus, SCSI bus, and PCIe bus. - -The PCIe bus is a typical tree structure and has good scalability. The buses are associated with each other by using a controller. The following uses the PCIe bus as an example to describe how to configure a bus topology for a VM. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->The bus configuration is complex. If the device topology does not need to be precisely controlled, the default bus configuration automatically generated by libvirt can be used. - -#### Elements - -In the XML configuration of libvirt, each controller element \(**controller**\) represents a bus, and one or more controllers or devices can be mounted to one controller depending on the VM architecture. This topic describes common attributes and subelements. - -**controller**: controller element, which indicates a bus. - -- Attribute **type**: bus type, which is mandatory for the controller. The common values are **pci**, **usb**, **scsi**, **virtio-serial**, **fdc**, and **ccid**. -- Attribute **index**: bus number of the controller \(the number starts from 0\), which is mandatory for the controller. This attribute can be used in the **address** element. -- Attribute **model**: specific model of the controller, which is mandatory for the controller. The available values are related to the value of **type**. For details about the mapping and description, see [Table 1](#table191911761111). -- Subelement **address**: mount location of a device or controller on the bus network. - - Attribute **type**: device address type. The common values are **pci**, **usb**, or **drive**. The attribute varies according to the **type** of the **address**. For details about the common **type** attribute value and the corresponding **address** attribute, see [Table 2](#table1200165711314). - -- Subelement **model**: name of a controller model. - - Attribute **name**: name of a controller model, which corresponds to the **model** attribute in the parent element controller. - - -**Table 1** Mapping between the common values of **type** and **model** for the controller. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Value of Type

-

Value of Model

-

Introduction

-

pci

-

pcie-root

-

PCIe root node, which can be used to mount PCIe devices or controllers.

-

pcie-root-port

-

Only one slot can be used to mount a PCIe device or controller.

-

pcie-to-pci-bridge

-

PCIe-to-PCI bridge controller, which can be used to mount PCI devices.

-

usb

-

ehci

-

USB 2.0 controller, which can be used to mount USB 2.0 devices.

-

nec-xhci

-

USB 3.0 controller, which can be used to mount USB 3.0 devices.

-

scsi

-

virtio-scsi

-

VirtIO SCSI controller, which can be used to mount block devices, such as disks and CD-ROMs.

-

virtio-serial

-

virtio-serial

-

VirtIO serial port controller, which can be used to mount serial port devices, such as a pty serial port.

-
- -**Table 2** Attributes of the **address** element in different devices. - - - - - - - - - - - - - - - - - - - - -

Value of Type

-

Description

-

Address

-

pci

-

The address type is PCI address, indicating the mount location of the device on the PCI bus network.

-

domain: domain ID of the PCI device.

-

bus: bus number of the PCI device.

-

slot: device number of the PCI device.

-

function: function number of the PCI device.

-

multifunction: (optional) specifies whether to enable the multifunction function.

-

usb

-

The address type is USB address, indicating the location of the device on the USB bus.

-

bus: bus number of the USB device.

-

port: port number of the USB device.

-

drive

-

The address type is storage device address, indicating the owning disk controller and its position on the bus.

-

controller: the number of the owning controller.

-

bus: channel number of the device output.

-

target: target number of the storage device.

-

unit: lun number of the storage device.

-
- -#### Configuration Example - -This example shows the topology of a PCIe bus. Three PCIe-Root-Port controllers are mounted to the PCIe root node \(BUS 0\). The multifunction function is enabled for the first PCIe-Root-Port controller \(BUS 1\). A PCIe-to-PCI-bridge controller is mounted to the first PCIe-Root-Port controller to form a PCI bus \(BUS 3\). A virtio-serial device and a USB 2.0 controller are mounted to the PCI bus. A SCSI controller is mounted to the second PCIe-Root-Port controller \(BUS 2\). No device is mounted to the third PCIe-Root-Port controller \(BUS 0\). The configuration details are as follows: - -``` - - ... - - - -
- - -
- - - -
- - -
- - -
- - -
- - -
- - ... - - -``` - -### Other Common Devices - -#### Overview - -In addition to storage devices and network devices, some external devices need to be specified in the XML configuration file. This section describes how to configure these elements. - -#### Elements - -- **serial**: serial port device - - Attribute **type**: specifies the serial port type. The common attribute values are **pty**, **tcp**, **pipe**, and **file**. - - -- **video**: media device - - **type** attribute: media device type The common attribute value of the AArch architecture is **virtio**, and that of the x86\_64 architecture is **vga** or **cirrus**. - - Subelement **model**: subelement of **video**, which is used to specify the media device type. - - In the subelement **model**, if **type** is set to **vga**, a Video Graphics Array \(VGA\) video card is configured. **vram** indicates the size of the video RAM, in KB by default. - - For example, if a 16 MB VGA video card is configured for an x86\_64 VM, configuration in the XML file is as follows. In the example, the value of **vram** is the size of video RAM, in KB by default. - - ``` - - ``` - -- **input**: output device - - **type** attribute: specifies the type of the output device. The common attribute values are **tabe** and **keyboard**, indicating that the output device is the tablet and keyboard respectively. - - **bus**: specifies the bus to be mounted. The common attribute value is **USB**. - -- **emulator**: emulator application path -- **graphics**: graphics device - - **type** attribute: specifies the type of a graphics device. The common attribute value is **vnc**. - - **listen** attribute: specifies the IP address to be listened to. - - -#### Configuration Example - -For example, in the following example, the VM emulator path, pty serial port, VirtIO media device, USB tablet, USB keyboard, and VNC graphics device are configured. - ->![](./public_sys-resources/icon-note.gif) **NOTE:** ->When **type** of **graphics** is set to **VNC**, you are advised to set the **passwd** attribute, that is, the password for logging in to the VM using VNC. - -``` - - ... - - /usr/libexec/qemu-kvm - - - - - - ... - - -``` - -## Configurations Related to the System Architecture - -### Overview - -The XML configuration file contain configurations related to the system architecture, which cover the mainboard, CPU, and some features related to the architecture. This section describes meanings of these configurations. - -### Elements - -- **os**: defines VM startup parameters. - - Subelement **type**: specifies the VM type. The attribute **arch** indicates the architecture type, for example, AArch64. The attribute **machine** indicates the type of VM chipset. Supported chipset type can be queried by running the **qemu-kvm -machine ?** command. For example, the AArch64 architecture supports the **virt** type. - - Subelement **loader**: specifies the firmware to be loaded, for example, the UEFI file provided by the EDK. The **readonly** attribute indicates whether the file is read-only. The value can be **yes** or **no**. The **type** attribute indicates the **loader** type. The common values are **rom** and **pflash**. - - Subelement **nvram**: specifies the path of the **nvram** file, which is used to store the UEFI startup configuration. - - -- **features**: Hypervisor controls some VM CPU/machine features, such as the advanced configuration and power interface \(ACPI\) and the GICv3 interrupt controller specified by the ARM processor. - -### Example for AArch64 Architecture - -The VM is of the **aarch64** type and uses **virt** chipset. The VM configuration started using UEFI is as follows: - -``` - - ... - - hvm - /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw - /var/lib/libvirt/qemu/nvram/openEulerVM.fd - - ... - -``` - -Configure ACPI and GIC V3 interrupt controller features for the VM. - -``` - - - - -``` - -### Example for x86\_64 Architecture - -The x86\_64 architecture supports both BIOS and UEFI boot modes. If **loader** is not configured, the default BIOS boot mode is used. The following is a configuration example in which the UEFI boot mode and Q35 chipsets are used. - -``` - - ... - - hvm - /usr/share/edk2/ovmf/OVMF.fd - - ... - -``` - -## Other Common Configuration Items - -### Overview - -In addition to system resources and virtual devices, other elements need to be configured in the XML configuration file. This section describes how to configure these elements. - -### Elements - -- **iothreads**: specifies the number of **iothread**, which can be used to accelerate storage device performance. - -- **on\_poweroff**: action taken when a VM is powered off. -- **on\_reboot**: action taken when a VM is rebooted. -- **on\_crash**: action taken when a VM is on crash. -- **clock**: indicates the clock type. - - **offset** attribute: specifies the VM clock synchronization type. The value can be **localtime**, **utc**, **timezone**, or **variable**. - - -### Configuration Example - -Configure two **iothread** for the VM to accelerate storage device performance. - -``` -2 -``` - -Destroy the VM when it is powered off. - -``` -destroy -``` - -Restart the VM. - -``` -restart -``` - -Restart the VM when it is crashed. - -``` -restart -``` - -The clock uses the **utc** synchronization mode. - -``` - -``` - -## XML Configuration File Example - -### Overview - -This section provides XML configuration files of a basic AArch64 VM and a x86\_64 VM as two examples for reference. - -### Example 1 - -An XML configuration file of AArch64 VM, which contains basic elements. The following is a configuration example: - -``` - - openEulerVM - 8 - 4 - - hvm - /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw - /var/lib/libvirt/qemu/nvram/openEulerVM.fd - - - - - - - - - 1 - - destroy - restart - restart - - /usr/libexec/qemu-kvm - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -### Example 2 - -An XML configuration file of x86\_64 VM, which contains basic elements and bus elements. The following is a configuration example: - -``` - - openEulerVM - 8388608 - 8388608 - 4 - 1 - - hvm - - - - - - - - - destroy - restart - restart - - /usr/libexec/qemu-kvm - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- - - - - - -