version: "3.8" services: # HDFS NameNode hdfs-namenode: image: bde2020/hadoop-namenode:2.0.0-hadoop2.7.4-java8 container_name: hdfs-namenode ports: - "8020:8020" - "9870:9870" environment: - CLUSTER_NAME=test volumes: - namenode-data:/hadoop/dfs/name # HDFS DataNode hdfs-datanode: image: bde2020/hadoop-datanode:2.0.0-hadoop2.7.4-java8 container_name: hdfs-datanode depends_on: - hdfs-namenode environment: - CLUSTER_NAME=test - CORE_CONF_fs_defaultFS=hdfs://hdfs-namenode:8020 volumes: - datanode-data:/hadoop/dfs/data # Flink JobManager(业务上可能需要将 paimon 的 connector 包导入) jobmanager: image: flink:latest container_name: flink-jobmanager ports: - "8081:8081" depends_on: - hdfs-namenode command: jobmanager environment: - JOB_MANAGER_RPC_ADDRESS=flink-jobmanager volumes: - ./hadoop-common-2.7.4.jar:/opt/flink/lib/hadoop-common-2.7.4.jar - ./hadoop-client-2.7.4.jar:/opt/flink/lib/hadoop-client-2.7.4.jar - ./hadoop-hdfs-2.7.4.jar:/opt/flink/lib/hadoop-hdfs-2.7.4.jar - ./commons-logging-1.2.jar:/opt/flink/lib/commons-logging-1.2.jar - ./hadoop-auth-2.7.4.jar:/opt/flink/lib/hadoop-auth-2.7.4.jar - ./paimon-flink-1.20.jar:/opt/flink/lib/paimon-flink-1.20.jar - ./flink-shaded-hadoop-2-uber-2.7.5-9.0.jar:/opt/flink/lib/flink-shaded-hadoop-2-uber-2.7.5-9.0.jar # Flink TaskManager taskmanager: image: flink:latest container_name: flink-taskmanager depends_on: - jobmanager command: taskmanager environment: - JOB_MANAGER_RPC_ADDRESS=flink-jobmanager - TASK_MANAGER_MEMORY=2g - TASK_MANAGER_NUMBER_OF_TASK_SLOTS=4 volumes: - ./hadoop-common-2.7.4.jar:/opt/flink/lib/hadoop-common-2.7.4.jar - ./hadoop-client-2.7.4.jar:/opt/flink/lib/hadoop-client-2.7.4.jar - ./hadoop-hdfs-2.7.4.jar:/opt/flink/lib/hadoop-hdfs-2.7.4.jar - ./commons-logging-1.2.jar:/opt/flink/lib/commons-logging-1.2.jar - ./hadoop-auth-2.7.4.jar:/opt/flink/lib/hadoop-auth-2.7.4.jar - ./paimon-flink-1.20.jar:/opt/flink/lib/paimon-flink-1.20.jar - ./flink-shaded-hadoop-2-uber-2.7.5-9.0.jar:/opt/flink/lib/flink-shaded-hadoop-2-uber-2.7.5-9.0.jar # StarRocks(含有 paimon 模块,这里替代之前单独 docker run 命令) starrocks: image: starrocks/allin1-ubuntu container_name: starrocks ports: - "9030:9030" - "8030:8030" - "8040:8040" depends_on: - hdfs-namenode environment: # 可通过环境变量或配置文件让 StarRocks (或其中的 paimon 模块) # 识别 HDFS NameNode 的地址 - HDFS_NAMENODE_URL=hdfs://hdfs-namenode:8020 # 其他必要的配置项也可以在这里补充 # 如果需要挂载 starrocks 的配置目录,也可以使用 volumes volumes: namenode-data: datanode-data: