diff --git a/kafka/3.7.0/22.03-lts-sp3/Dockerfile b/kafka/3.7.0/22.03-lts-sp3/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..1112c507b7ee9a091a4e15a5682068e418847262 --- /dev/null +++ b/kafka/3.7.0/22.03-lts-sp3/Dockerfile @@ -0,0 +1,35 @@ +ARG BASE=openeuler/openeuler:22.03-lts-sp3 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=3.7.0 + +ARG SCALA_VERSION=2.13 +ARG KAFKA_URL=https://archive.apache.org/dist/kafka/${VERSION}/kafka_${SCALA_VERSION}-${VERSION}.tgz + +RUN yum -y install wget hostname java-1.8.0-openjdk java-1.8.0-openjdk-devel +RUN mkdir opt/kafka; \ + wget -O kafka.tgz "${KAFKA_URL}"; \ + tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ + mkdir -p /var/lib/kafka/data /etc/kafka/secrets; \ + mkdir -p /etc/kafka/docker /usr/logs /mnt/shared/config; \ + useradd -d /home/appuser -m -s /bin/bash appuser; \ + chown appuser:appuser -R /usr/logs /opt/kafka /mnt/shared/config; \ + chown appuser:root -R /var/lib/kafka /etc/kafka/secrets /etc/kafka; \ + chmod -R ug+w /etc/kafka /var/lib/kafka /etc/kafka/secrets; \ + cp /opt/kafka/config/log4j.properties /etc/kafka/docker/log4j.properties; \ + cp /opt/kafka/config/tools-log4j.properties /etc/kafka/docker/tools-log4j.properties; \ + cp /opt/kafka/config/kraft/server.properties /etc/kafka/docker/server.properties; \ + rm kafka.tgz; \ + yum remove wget findutils; \ + yum clean all + +COPY --chown=appuser:appuser scripts /etc/kafka/docker +COPY --chown=appuser:appuser launch /etc/kafka/docker/launch +RUN chmod 755 /etc/kafka/docker/run + +EXPOSE 9092 +USER appuser + +VOLUME ["/etc/kafka/secrets", "/var/lib/kafka/data", "/mnt/shared/config"] +CMD ["/etc/kafka/docker/run"] \ No newline at end of file diff --git a/kafka/3.7.0/22.03-lts-sp3/launch b/kafka/3.7.0/22.03-lts-sp3/launch new file mode 100644 index 0000000000000000000000000000000000000000..900eaab8632aa4fb2b3ff4897d805e9dc148fa1e --- /dev/null +++ b/kafka/3.7.0/22.03-lts-sp3/launch @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Override this section from the script to include the com.sun.management.jmxremote.rmi.port property. +if [ -z "${KAFKA_JMX_OPTS-}" ]; then + export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true \ + -Dcom.sun.management.jmxremote.authenticate=false \ + -Dcom.sun.management.jmxremote.ssl=false " +fi + +# The JMX client needs to be able to connect to java.rmi.server.hostname. +# The default for bridged n/w is the bridged IP so you will only be able to connect from another docker container. +# For host n/w, this is the IP that the hostname on the host resolves to. + +# If you have more than one n/w configured, hostname -i gives you all the IPs, +# the default is to pick the first IP (or network). +export KAFKA_JMX_HOSTNAME=${KAFKA_JMX_HOSTNAME:-$(hostname -i | cut -d" " -f1)} + +if [ "${KAFKA_JMX_PORT-}" ]; then + # This ensures that the "if" section for JMX_PORT in kafka launch script does not trigger. + export JMX_PORT=$KAFKA_JMX_PORT + export KAFKA_JMX_OPTS="${KAFKA_JMX_OPTS-} -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME \ + -Dcom.sun.management.jmxremote.local.only=false \ + -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT \ + -Dcom.sun.management.jmxremote.port=$JMX_PORT" +fi + +# Make a temp env variable to store user provided performance otps +if [ -z "${KAFKA_JVM_PERFORMANCE_OPTS-}" ]; then + export TEMP_KAFKA_JVM_PERFORMANCE_OPTS="" +else + export TEMP_KAFKA_JVM_PERFORMANCE_OPTS="$KAFKA_JVM_PERFORMANCE_OPTS" +fi + +# We will first use CDS for storage to format storage +export KAFKA_JVM_PERFORMANCE_OPTS="${KAFKA_JVM_PERFORMANCE_OPTS-} -XX:SharedArchiveFile=/opt/kafka/storage.jsa" + +echo "===> Using provided cluster id $CLUSTER_ID ..." + +# Invoke the docker wrapper to setup property files and format storage +result=$(/opt/kafka/bin/kafka-run-class.sh kafka.docker.KafkaDockerWrapper setup \ + --default-configs-dir /etc/kafka/docker \ + --mounted-configs-dir /mnt/shared/config \ + --final-configs-dir /opt/kafka/config 2>&1) || \ + echo $result | grep -i "already formatted" || \ + { echo $result && (exit 1) } + +# Using temp env variable to get rid of storage CDS command +export KAFKA_JVM_PERFORMANCE_OPTS="$TEMP_KAFKA_JVM_PERFORMANCE_OPTS" + +# Now we will use CDS for kafka to start kafka server +export KAFKA_JVM_PERFORMANCE_OPTS="$KAFKA_JVM_PERFORMANCE_OPTS -XX:SharedArchiveFile=/opt/kafka/kafka.jsa" + +# Start kafka broker +exec /opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/server.properties \ No newline at end of file diff --git a/kafka/3.7.0/22.03-lts-sp3/scripts/bash-config b/kafka/3.7.0/22.03-lts-sp3/scripts/bash-config new file mode 100644 index 0000000000000000000000000000000000000000..3f0dc4508f11a92c0deabd1d8f314f9c901d7c72 --- /dev/null +++ b/kafka/3.7.0/22.03-lts-sp3/scripts/bash-config @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o nounset \ + -o errexit + +# Trace may expose passwords/credentials by printing them to stdout, so turn on with care. +if [ "${TRACE:-}" == "true" ]; then + set -o verbose \ + -o xtrace +fi \ No newline at end of file diff --git a/kafka/3.7.0/22.03-lts-sp3/scripts/configure b/kafka/3.7.0/22.03-lts-sp3/scripts/configure new file mode 100644 index 0000000000000000000000000000000000000000..9d9961d59dd56e192180cd8bd868bdcf6dd489fc --- /dev/null +++ b/kafka/3.7.0/22.03-lts-sp3/scripts/configure @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ensure() { + if [[ -z "${!1}" ]]; then + echo "$1 environment variable not set" + exit 1 + fi +} + +path() { + if [[ $2 == "writable" ]]; then + if [[ ! -w "$1" ]]; then + echo "$1 file not writable" + exit 1 + fi + elif [[ $2 == "existence" ]]; then + if [[ ! -e "$1" ]]; then + echo "$1 file does not exist" + exit 1 + fi + fi +} + +# unset KAFKA_ADVERTISED_LISTENERS from ENV in KRaft mode when running as controller only +if [[ -n "${KAFKA_PROCESS_ROLES-}" ]] +then + echo "Running in KRaft mode..." + ensure CLUSTER_ID + if [[ $KAFKA_PROCESS_ROLES == "controller" ]] + then + if [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] + then + echo "KAFKA_ADVERTISED_LISTENERS is not supported on a KRaft controller." + exit 1 + else + # Unset in case env variable is set with empty value + unset KAFKA_ADVERTISED_LISTENERS + fi + fi +fi + +# By default, LISTENERS is derived from ADVERTISED_LISTENERS by replacing +# hosts with 0.0.0.0. This is good default as it ensures that the broker +# process listens on all ports. +if [[ -z "${KAFKA_LISTENERS-}" ]] && ( [[ -z "${KAFKA_PROCESS_ROLES-}" ]] || [[ $KAFKA_PROCESS_ROLES != "controller" ]] ) && [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] +then + export KAFKA_LISTENERS + KAFKA_LISTENERS=$(echo "$KAFKA_ADVERTISED_LISTENERS" | sed -e 's|://[^:]*:|://0.0.0.0:|g') +fi + +path /opt/kafka/config/ writable + +# Set if ADVERTISED_LISTENERS has SSL:// or SASL_SSL:// endpoints. +if [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] && [[ $KAFKA_ADVERTISED_LISTENERS == *"SSL://"* ]] +then + echo "SSL is enabled." + + ensure KAFKA_SSL_KEYSTORE_FILENAME + export KAFKA_SSL_KEYSTORE_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEYSTORE_FILENAME" + path "$KAFKA_SSL_KEYSTORE_LOCATION" existence + + ensure KAFKA_SSL_KEY_CREDENTIALS + KAFKA_SSL_KEY_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEY_CREDENTIALS" + path "$KAFKA_SSL_KEY_CREDENTIALS_LOCATION" existence + export KAFKA_SSL_KEY_PASSWORD + KAFKA_SSL_KEY_PASSWORD=$(cat "$KAFKA_SSL_KEY_CREDENTIALS_LOCATION") + + ensure KAFKA_SSL_KEYSTORE_CREDENTIALS + KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_KEYSTORE_CREDENTIALS" + path "$KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION" existence + export KAFKA_SSL_KEYSTORE_PASSWORD + KAFKA_SSL_KEYSTORE_PASSWORD=$(cat "$KAFKA_SSL_KEYSTORE_CREDENTIALS_LOCATION") + + if [[ -n "${KAFKA_SSL_CLIENT_AUTH-}" ]] && ( [[ $KAFKA_SSL_CLIENT_AUTH == *"required"* ]] || [[ $KAFKA_SSL_CLIENT_AUTH == *"requested"* ]] ) + then + ensure KAFKA_SSL_TRUSTSTORE_FILENAME + export KAFKA_SSL_TRUSTSTORE_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_TRUSTSTORE_FILENAME" + path "$KAFKA_SSL_TRUSTSTORE_LOCATION" existence + + ensure KAFKA_SSL_TRUSTSTORE_CREDENTIALS + KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION="/etc/kafka/secrets/$KAFKA_SSL_TRUSTSTORE_CREDENTIALS" + path "$KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION" existence + export KAFKA_SSL_TRUSTSTORE_PASSWORD + KAFKA_SSL_TRUSTSTORE_PASSWORD=$(cat "$KAFKA_SSL_TRUSTSTORE_CREDENTIALS_LOCATION") + fi +fi + +# Set if KAFKA_ADVERTISED_LISTENERS has SASL_PLAINTEXT:// or SASL_SSL:// endpoints. +if [[ -n "${KAFKA_ADVERTISED_LISTENERS-}" ]] && [[ $KAFKA_ADVERTISED_LISTENERS =~ .*SASL_.*://.* ]] +then + echo "SASL" is enabled. + + ensure KAFKA_OPTS + + if [[ ! $KAFKA_OPTS == *"java.security.auth.login.config"* ]] + then + echo "KAFKA_OPTS should contain 'java.security.auth.login.config' property." + fi +fi + +if [[ -n "${KAFKA_JMX_OPTS-}" ]] +then + if [[ ! $KAFKA_JMX_OPTS == *"com.sun.management.jmxremote.rmi.port"* ]] + then + echo "KAFKA_OPTS should contain 'com.sun.management.jmxremote.rmi.port' property. It is required for accessing the JMX metrics externally." + fi +fi \ No newline at end of file diff --git a/kafka/3.7.0/22.03-lts-sp3/scripts/configureDefaults b/kafka/3.7.0/22.03-lts-sp3/scripts/configureDefaults new file mode 100644 index 0000000000000000000000000000000000000000..c3c68ec8d311cf1c176b3a4907362bffdc85f8f0 --- /dev/null +++ b/kafka/3.7.0/22.03-lts-sp3/scripts/configureDefaults @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +declare -A env_defaults +env_defaults=( +# Replace CLUSTER_ID with a unique base64 UUID using "bin/kafka-storage.sh random-uuid" + ["CLUSTER_ID"]="5L6g3nShT-eMCtK--X86sw" +) + +for key in "${!env_defaults[@]}"; do + if [[ -z "${!key:-}" ]]; then + echo ${key} not set. Setting it to default value: \"${env_defaults[$key]}\" + export "$key"="${env_defaults[$key]}" + fi +done \ No newline at end of file diff --git a/kafka/3.7.0/22.03-lts-sp3/scripts/run b/kafka/3.7.0/22.03-lts-sp3/scripts/run new file mode 100644 index 0000000000000000000000000000000000000000..9b4d43d6e771b9a3bf27fa3c08ee73f86b6e9c7b --- /dev/null +++ b/kafka/3.7.0/22.03-lts-sp3/scripts/run @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +. /etc/kafka/docker/bash-config + +# Set environment values if they exist as arguments +if [ $# -ne 0 ]; then + echo "===> Overriding env params with args ..." + for var in "$@" + do + export "$var" + done +fi + +echo "===> User" +id + +echo "===> Setting default values of environment variables if not already set." +. /etc/kafka/docker/configureDefaults + +echo "===> Configuring ..." +. /etc/kafka/docker/configure + +echo "===> Launching ... " +. /etc/kafka/docker/launch \ No newline at end of file diff --git a/kafka/README.md b/kafka/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fb8d0b62649d71bfd8a4a452855c913b1b2d869a --- /dev/null +++ b/kafka/README.md @@ -0,0 +1,30 @@ +# Kafka + +# Quick reference + +- The official kafka docker image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images and push: +```shell +docker buildx build -t "openeuler/kafka:$TAG" --platform linux/amd64,linux/arm64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +2. Run: +```shell +docker run -d --name kafka -p 9092:9092 openeuler/kafka:{TAG} +``` + +# Supported tags and respective Dockerfile links + +- 3.7.0-oe2203sp3: kafka v3.7.0, openEuler 22.03 LTS SP3 + +## Operating System +Linux/Unix, ARM64 or x86-64 architecture. diff --git a/kafka/doc/image-info.yml b/kafka/doc/image-info.yml new file mode 100644 index 0000000000000000000000000000000000000000..7e5fd2dc4e49c5afd47e1c081bc906a793a17122 --- /dev/null +++ b/kafka/doc/image-info.yml @@ -0,0 +1,90 @@ +name: Kafka +category: cloud +description: Kafka是一个分布式系统,由通过高性能TCP网络协议进行通信的服务器和客户端组成。它可以部署在本地和云环境中的裸机硬件、虚拟机和容器上。 +environment: | + 本应用以容器镜像的方式供用户使用,运行于预置docker的Linux环境。Docker的推荐安装方式如下: + ## apt安装 + ### 1.更新系统软件包依赖 + ``` + sudo spt update + sudo apt install ca-certificates curl gnupg lsb-release + ``` + ### 2.添加Docker官方GPG密钥 + ``` + sudo mkdir -p /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + sudo chmod a+r /etc/apt/keyrings/docker.gpg + ``` + ### 3.添加Docker官方APT源 + ``` + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + ``` + ### 4.再次更新软件包索引 + ``` + sudo apt update + ``` + ### 5.安装Docker + ``` + sudo apt install docker-ce docker-ce-cli containerd.io + ``` + + ## yum安装 + ### 1.安装yum-utils + ``` + sudo yum install -y yum-utils + ``` + ### 2.添加Docker源 + ``` + sudo yum-config-manager \ + > --add-repo \ + > https://download.docker.com/linux/centos/docker-ce.repo + ``` + ### 3.安装Docker + ``` + sudo yum install -y docker-ce docker-ce-cli containerd.io + ``` + + 注意,在openEuler环境安装docker时,可执行如下命令一键安装 + ``` + yum install -y docker + ``` + + ## 脚本安装 + ### 1.下载安装脚本 + ``` + curl -fsSL https://get.docker.com -o get-docker.sh + ``` + ### 2.使用root权限执行脚本 + ``` + sudo sh get-docker.sh + ``` + 使用脚本安装方式时,如果用户要指定安装的docker版本,需要自行修改`get-docker.sh`的源码内容。 + + ## 验证 + ``` + sudo docker run hello-world + ``` + +download: | + 获取容器镜像 + ``` + docker pull openeuler/kafka:{TAG} + ``` + +install: | + 启动容器 + ``` + docker run -d --name kafka -p 9092:9092 openeuler/kafka:{TAG} + ``` + 用户可根据自身需求,自定义启动选项。 + +license: Apache-2.0 license +similar_packages: + - Apache Pulsar: 一个高性能的分布式消息队列和流处理平台,提供持久化消息存储、多租户支持、可扩展性和可靠性等功能。 + - RabbitMQ: 一个开源的消息代理和队列系统,支持多种消息协议,包括AMQP、MQTT、STOMP 等。RabbitMQ提供可靠的消息传递和高度可扩展性。 + - Apache ActiveMQ: 一个基于Java的消息队列和消息总线系统,支持多种消息协议,包括AMQP、STOMP、OpenWire等。它提供持久化消息存储和高可用性特性。 + - Apache RocketMQ: 一个分布式消息和流处理平台,具有高吞吐量和低延迟的特点。它支持消息的顺序传递和可靠的消息传递。 + - AWS Kinesis: 亚马逊云服务中的流处理平台,用于收集、处理和分析实时数据流。它提供了可扩展的消息传递和数据处理能力。 + - Apache Samza: 一个分布式流处理框架,用于实时处理和分析数据流。它与Apache Kafka集成紧密,可以直接从Kafka获取数据进行处理。。 +dependency: + - openjdk diff --git a/kafka/doc/picture/logo.png b/kafka/doc/picture/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..bd45f20811dd3910f34486d6f40f9eeecf9757ec Binary files /dev/null and b/kafka/doc/picture/logo.png differ diff --git a/kafka/meta.yml b/kafka/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..4292c36e5ad3822f8c3efeb082766c4bf197ce6c --- /dev/null +++ b/kafka/meta.yml @@ -0,0 +1,2 @@ +3.7.0-oe2203sp3: + - kafka/3.7.0/22.03-lts-sp3/Dockerfile \ No newline at end of file