Commit 89bb2b7e authored by Combava Orange's avatar Combava Orange
Browse files

[clea-kafka-ssl] docker-compose with optional kong/kafka+ssl/minio services

parent d04211ab
......@@ -4,9 +4,11 @@ export LANG=fr_FR.utf8
export PATH=$PATH:~/.local/bin
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
cd $SCRIPTPATH
CLEA_BATCH_CLUSTER_OUTPUT_PATH=${CLEA_BATCH_CLUSTER_OUTPUT_PATH:-/tmp/v1}
mkdir -p ${CLEA_BATCH_CLUSTER_OUTPUT_PATH}
export FLASK_APP=web.py
export FLASK_ENV=development
......
......@@ -2,18 +2,49 @@
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
SCRIPTNAME=`basename $0`
OPTIONS="-p $SCRIPTNAME -f $SCRIPTPATH/docker-compose.yml"
OPTIONS="-p $SCRIPTNAME -f $SCRIPTPATH/docker-compose.yml $OVERRIDES"
# IP of the host (bridge)
export EXTERNAL_IP=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')
if [ "$1" == "restart" ] ; then
shift
docker-compose $OPTIONS stop $*
docker-compose $OPTIONS rm -f $*
docker-compose $OPTIONS up -d $*
else
docker-compose $OPTIONS $*
fi
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
restart)
shift
docker-compose $OPTIONS stop $*
docker-compose $OPTIONS rm -f $*
docker-compose $OPTIONS up -d $*
exit
;;
-o|--override)
shift
name=$1
shift
if [ -f $SCRIPTPATH/docker-compose_override_$name.yml ] ; then
OPTIONS="${OPTIONS} -f $SCRIPTPATH/docker-compose_override_$name.yml"
# specific to ssl, kafdrop need a base64 of the generated truststore
if [ ! -f "$SCRIPTPATH/kafka/certs/truststore.jks" ] ; then
echo "Truststore not generated, use 'kafka/make_certs.sh kafka broker' "
exit 1
else
export B64_TRUSTSTORE="$(cat $SCRIPTPATH/kafka/certs/truststore.jks | base64 -w 0)"
fi
else
echo "file docker-compose_override_$name.yml not found"
exit 1
fi
;;
*)
break
;;
esac
done
docker-compose $OPTIONS $*
#! /bin/bash
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
$SCRIPTPATH/clea -o ssl $@
version: "3.8"
services:
nginx:
image: nginx:1.19
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d:ro
clea-ws-rest:
image: clea-ws-rest
build: ../clea-ws-rest/
environment:
SPRING_PROFILES_ACTIVE: dev,docker
depends_on:
- kafka
ports:
- "80:80"
- "8080:8080"
networks:
- public
- clea-network
restart: always
kong:
image: kong:2.3
clea-venue-consumer:
image: clea-venue-consumer
build: ../clea-venue-consumer/
environment:
KONG_DATABASE: "off"
KONG_PROXY_ACCESS_LOG: /dev/stdout
KONG_ADMIN_ACCESS_LOG: /dev/stdout
KONG_PROXY_ERROR_LOG: /dev/stderr
KONG_ADMIN_ERROR_LOG: /dev/stderr
KONG_ADMIN_LISTEN: "0.0.0.0:8001"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
SPRING_PROFILES_ACTIVE: dev,docker
SPRING_JPA_HIBERNATE_DDL_AUTO: none
depends_on:
- postgres
- kafka
ports:
- "8000:8000"
- "8001:8001"
volumes:
- ./kong/kong.yml:/home/kong/kong.yml:ro
networks:
- clea-network
konga:
image: pantsel/konga:next
environment:
BASE_URL: /konga/
TOKEN_SECRET: km1GUr4RkcQD7DewhJPNXrCuZwcKmqjb
NODE_ENV: production
#ports:
# - "1337:1337"
- "7070:8080"
networks:
- clea-network
restart: always
postgres:
image: postgres:9
hostname: postgres
ports:
- "5432:5432"
clea-batch:
image: clea-batch:latest
build: ../clea-batch/
environment:
#POSTGRES_USER: admin # replace default guest/guest
POSTGRES_PASSWORD: "admin123!"
volumes:
- ./postgres/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
SPRING_PROFILES_ACTIVE: docker
BUCKET: clea-batch
ports:
- "15000:15000"
depends_on:
- postgres
networks:
- clea-network
restart: "no"
pgadmin:
image: dpage/pgadmin4:5.1
environment:
PGADMIN_DEFAULT_EMAIL: "admin@pgadmin.local"
PGADMIN_DEFAULT_PASSWORD: "admin123!"
networks:
- clea-network
minio:
image: minio/minio:edge
command: ["server", "/data"]
kafka:
image: "wurstmeister/kafka:latest"
environment:
MINIO_ACCESS_KEY: AKIAIOSFODNN7EXAMPLE
MINIO_SECRET_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
#ports:
# - "9000:9000"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
KAFKA_INTER_BROKER_LISTENER_NAME: "INTERNAL"
KAFKA_LISTENERS: "INTERNAL://:29092,EXTERNAL://:9092"
KAFKA_ADVERTISED_LISTENERS: "INTERNAL://kafka:29092,EXTERNAL://localhost:9092"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
depends_on:
- zookeeper
ports:
- "9092:9092"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
networks:
- clea-network
restart: always
# From https://github.com/confluentinc/cp-all-in-one/blob/6.1.1-post/cp-all-in-one/docker-compose.yml
zookeeper:
image: confluentinc/cp-zookeeper:6.1.1
hostname: zookeeper
container_name: zookeeper
image: wurstmeister/zookeeper:latest
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- clea-network
restart: always
kafka:
image: confluentinc/cp-server:6.1.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker:9092 #,PLAINTEXT_HOST://broker:9092
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
#KAFKA_JMX_PORT: 9101
#KAFKA_JMX_HOSTNAME: localhost
#KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:9092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
#CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
networks:
- clea-network
# cmak allow to manage kafka but is not so easy and don't allow to see messages'
# kafka-man:
# image: kafkamanager/kafka-manager
# command: ["cmak-3.0.0.4/bin/cmak", "-Dhttp.port=8080","-Dplay.http.context=/cmak"]
# environment:
# ZK_HOSTS: zookeeper
# KAFKA_MANAGER_PASSWORD: "admin123!"
# #ports:
# # - '9001:8080'
# networks:
# - clea-network
# kafdrop is a single page console already connected to kafka that allow to see messages
kafdrop:
image: "obsidiandynamics/kafdrop:latest"
environment:
JVM_OPTS: "-Xms16M -Xmx48M -Xss180K -XX:-TieredCompilation -XX:+UseStringDeduplication -noverify"
KAFKA_BROKERCONNECT: "kafka:9092"
KAFKA_BROKERCONNECT: "kafka:29092"
SERVER_SERVLET_CONTEXTPATH: "/kafdrop"
depends_on:
- kafka
#ports:
# - "9000:9000"
ports:
- "9000:9000"
networks:
- clea-network
clea-ws-rest:
image: clea-ws-rest:latest
container_name: clea-ws-rest
build: ../clea-ws-rest/
restart: always
environment:
SPRING_PROFILES_ACTIVE: docker
KAFKA_URL: kafka:9092
depends_on:
- kafka
#ports:
# - "8080:8080"
networks:
- clea-network
clea-venue-consumer:
image: clea-venue-consumer:latest
container_name: clea-venue-consumer
build: ../clea-venue-consumer/
postgres:
image: "postgres:latest"
environment:
SPRING_PROFILES_ACTIVE: docker
DB_URL: jdbc:postgresql://postgres:5432/clea
DB_USER: clea
DB_PASSWORD: aelc
JPA_DDL_AUTO: none
KAFKA_URL: kafka:9092
CLEA_CRYPTO_AUTHORITY_SECRET: "c991482ff078a3d83203dfcee763bd33366e0c2c89050e29b05334555964f736"
depends_on:
- postgres
- kafka
POSTGRES_DB: cleadb
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
ports:
- "5432:5432"
volumes:
- "./postgres/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d"
networks:
- clea-network
restart: always
clea-batch:
image: clea-batch:latest
build: ../clea-batch/
#entrypoint: ["/bin/tail","-f","/dev/null"]
pgadmin:
image: "dpage/pgadmin4:latest"
environment:
SPRING_PROFILES_ACTIVE: docker
DB_URL: jdbc:postgresql://postgres:5432/clea
DB_USER: clea
DB_PASSWORD: "aelc"
BUCKET: cleacluster-eu-west-3
CLEA_BATCH_CLUSTER_OUTPUT_PATH: /tmp/v1
volumes:
- ./batch/s3cfg:/home/javaapp/.s3cfg
PGADMIN_DEFAULT_EMAIL: user@pgadmin.com
PGADMIN_DEFAULT_PASSWORD: password
PGADMIN_LISTEN_PORT: 80
depends_on:
- postgres
ports:
- "8081:80"
networks:
- clea-network
restart: "no"
restart: always
logging:
driver: none
networks:
public:
driver: bridge
name: public
clea-network:
#driver: bridge
driver: bridge
name: clea-network
version: "3.8"
services:
# ADD NGinx and Kong
nginx:
image: nginx:1.19
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d:ro
ports:
- "80:80"
networks:
- public
- clea-network
kong:
image: kong:2.3
environment:
KONG_DATABASE: "off"
KONG_PROXY_ACCESS_LOG: /dev/stdout
KONG_ADMIN_ACCESS_LOG: /dev/stdout
KONG_PROXY_ERROR_LOG: /dev/stderr
KONG_ADMIN_ERROR_LOG: /dev/stderr
KONG_ADMIN_LISTEN: "0.0.0.0:8001"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
ports:
- "8000:8000"
- "8001:8001"
volumes:
- ./kong/kong.yml:/home/kong/kong.yml:ro
networks:
- clea-network
konga:
image: pantsel/konga:next
environment:
TOKEN_SECRET: km1GUr4RkcQD7DewhJPNXrCuZwcKmqjb
NODE_ENV: production
KONGA_LOG_LEVEL: silly
ports:
- "1337:1337"
networks:
- clea-network
networks:
public:
driver: bridge
name: public
version: "3.8"
services:
minio:
image: minio/minio:edge
environment:
MINIO_ACCESS_KEY: AKIAIOSFODNN7EXAMPLE
MINIO_SECRET_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
entrypoint: sh
command: -c 'mkdir -p /data/clea-batch && /usr/bin/minio server /data'
ports:
- "9100:9000"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- clea-network
clea-batch:
volumes:
- ./batch/s3cfg:/home/javaapp/.s3cfg
networks:
clea-network:
driver: bridge
name: clea-network
version: "3.8"
services:
# kafka:
# image: confluentinc/cp-zookeeper:6.1.1
# environment:
# KAFKA_LISTENERS: SSL://broker:9092
# KAFKA_ADVERTISED_LISTENERS: SSL://broker:9092
# KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
# KAFKA_SSL_KEYSTORE_FILENAME: broker.keystore.jks
# KAFKA_SSL_KEYSTORE_CREDENTIALS: 'kafka-keystore-file.txt'
# KAFKA_SSL_KEY_CREDENTIALS: kafka-key-file.txt
# KAFKA_SSL_TRUSTSTORE_LOCATION: '/etc/kafka/secrets/truststore.jks'
# KAFKA_SSL_TRUSTSTORE_PASSWORD: 'serversecret'
# volumes:
# - ./kafka/certs:/etc/kafka/secrets/:ro
# networks:
# - clea-network
kafka:
image: "wurstmeister/kafka:latest"
environment:
KAFKA_LISTENERS: SSL://kafka:9092
KAFKA_ADVERTISED_LISTENERS: SSL://kafka:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: ~
KAFKA_INTER_BROKER_LISTENER_NAME: ~
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
KAFKA_SSL_KEYSTORE_LOCATION: /etc/kafka/secrets/kafka.keystore.jks
KAFKA_SSL_KEYSTORE_PASSWORD: 'serversecret'
KAFKA_SSL_KEY_PASSWORD: serversecret
KAFKA_SSL_TRUSTSTORE_LOCATION: '/etc/kafka/secrets/truststore.jks'
KAFKA_SSL_TRUSTSTORE_PASSWORD: 'serversecret'
volumes:
- ./kafka/certs:/etc/kafka/secrets/:ro
networks:
- clea-network
# cmak allow to manage kafka but is not so easy and don't allow to see messages'
# kafka-man:
# image: kafkamanager/kafka-manager
# command: ["cmak-3.0.0.4/bin/cmak", "-Dhttp.port=8080","-Dplay.http.context=/cmak"]
# environment:
# ZK_HOSTS: zookeeper
# KAFKA_MANAGER_PASSWORD: "admin123!"
# #ports:
# # - '9001:8080'
# networks:
# - clea-network
# kafdrop is a single page console already connected to kafka that allow to see messages
kafdrop:
image: "obsidiandynamics/kafdrop:latest"
environment:
KAFKA_BROKERCONNECT: "kafka:9092"
KAFKA_TRUSTSTORE: ${B64_TRUSTSTORE}
KAFKA_PROPERTIES: c2VjdXJpdHkucHJvdG9jb2w9U1NMCnNzbC50cnVzdHN0b3JlLnBhc3N3b3JkPXNlcnZlcnNlY3JldAojc3NsLnRydXN0c3RvcmUudHlwZT1KS1MK
volumes:
- ./kafka/certs:/etc/kafka/secrets/:ro
clea-ws-rest:
environment:
SPRING_KAFKA_BOOTSTRAP_SERVERS: kafka:9092
SPRING_KAFKA_PROPERTIES_SECURITY_PROTOCOL: SSL
SPRING_KAFKA_SSL_TRUST_STORE_LOCATION: file:///etc/kafka/secrets/truststore.jks
SPRING_KAFKA_SSL_TRUST_STORE_PASSWORD: serversecret
volumes:
- ./kafka/certs:/etc/kafka/secrets/:ro
clea-venue-consumer:
environment:
SPRING_KAFKA_BOOTSTRAP_SERVERS: kafka:9092
SPRING_KAFKA_PROPERTIES_SECURITY_PROTOCOL: SSL
SPRING_KAFKA_SSL_TRUST_STORE_LOCATION: file:///etc/kafka/secrets/truststore.jks
SPRING_KAFKA_SSL_TRUST_STORE_PASSWORD: serversecret
volumes:
- ./kafka/certs:/etc/kafka/secrets/:ro
security.protocol=SSL
ssl.truststore.password=serversecret
#ssl.truststore.type=JKS
#! /bin/bash
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
if [ $# -eq 0 ] ; then
echo "Usage: $0 prefixe alt_name"
echo " Generate a certificate sign by an authority (generated first)"
echo ""
echo "Example: $0 kafka broker"
echo ""
exit 0
fi
PREFIXE=$1
PREFIXE=${PREFIXE:-kafka}
#format SUBJECT_ALTERNATIVE_NAME=dns:test.abc.com,ip:1.1.1.1
SUBJECT_ALTERNATIVE_NAME=$2
EXT_SAN="" # optional -ext SAN section
[ -n "$SUBJECT_ALTERNATIVE_NAME" ] && EXT_SAN="-ext SAN=dns:${SUBJECT_ALTERNATIVE_NAME}"
# don't execute dangerous rm ./* so remove certificats before changing folder
rm certs/${PREFIXE}.* 2>/dev/null
mkdir -p $SCRIPTPATH/certs
pushd $SCRIPTPATH/certs
#- 1) create CA once
if [ ! -f "ca.crt" ] ; then
#- Create a Certificate Authority. The generated CA is a public-private key pair and certificate used to sign other certificates. A CA is responsible for signing certificates.
openssl req -new -newkey rsa:4096 -days 365 -x509 -subj "/CN=Kafka-Security-CA" -out ca.crt -keyout ca.key -nodes
#- 5) Create a truststore by importing the CA public certificate so that the kafka broker is trusting all certificates which has been issued by our CA:
keytool -keystore truststore.jks -alias CARoot -import -file ca.crt -storepass serversecret -keypass serversecret -noprompt
fi
#- 2) Create a kafka broker certificate:
echo keytool -genkey -keyalg RSA -keystore ${PREFIXE}.keystore.jks -validity 365 -storepass serversecret -keypass serversecret -alias ${PREFIXE} -dname "CN=${PREFIXE}" $EXT_SAN -storetype pkcs12
keytool -genkey -keyalg RSA -keystore ${PREFIXE}.keystore.jks -validity 365 -storepass serversecret -keypass serversecret -alias ${PREFIXE} -dname "CN=${PREFIXE}" $EXT_SAN -storetype pkcs12
#- 3) Get the signed version of the certificate:
keytool -keystore ${PREFIXE}.keystore.jks -certreq -alias ${PREFIXE} -file ${PREFIXE}.csr -storepass serversecret -keypass serversecret
#- 4) Sign the certificate with the CA:
openssl x509 -req -CA ca.crt -CAkey ca.key -in ${PREFIXE}.csr -out ${PREFIXE}.crt -days 365 -CAcreateserial -passin pass:serversecret
# - 6) Import the signed certificate in the keystore:
keytool -keystore ${PREFIXE}.keystore.jks -alias CARoot -import -file ca.crt -storepass serversecret -keypass serversecret -noprompt
keytool -keystore ${PREFIXE}.keystore.jks -alias ${PREFIXE} -import -file ${PREFIXE}.crt -storepass serversecret -keypass serversecret -noprompt
echo "serversecret" > ${PREFIXE}-key-file.txt
echo "serversecret" > ${PREFIXE}-keystore-file.txt
echo "serversecret" > truststore-key-file.txt
popd
#From kafka 2.0 onwards, host name verification of servers is enabled by default and the errors were logged because,
#the kafka hostname didnt match the certificate CN. If your hostname and certificate doesnt match,
#then you can disable the hostname verification by setting the property ssl.endpoint.identification.algorithm to empty string
#ssl.endpoint.identification.algorithm =
# GOAL
Generate a "root" certificate then a certificate for Kafka.
# Instructions
## Root CA
Generate a private key, then a certificate
```sh
openssl genrsa -out ca.pem 2048
openssl req -new -x509 -key ca.pem -out ca.crt -days 1095
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [AU]:FR
State or Province Name (full name) [Some-State]:
Locality Name (eg, city) []:
Organization Name (eg, company) [Internet Widgits Pty Ltd]:inria
Organizational Unit Name (eg, section) []:INT
Common Name (e.g. server FQDN or YOUR name) []:root-int
Email Address []:
```
## Kafka Int certificats
Generate a private key, then a certificate request
</