数据库与中间件部署
2025/8/29大约 5 分钟
一、DM8数据库
1.1、创建挂载目录
#创建挂载目录
mkdir /data/dm8
#给挂载目录授权
chmod -R 777 /data/dm8
1.1、编写docker-compose.yml脚本文件
sudo vi docker-compose.yml
填写下方内容然后保存即可
version: "3.2"
services:
dm8:
privileged: true
hostname: dm8
image: dm8:dm8_20250506_x86_rh7_64 #可联系达梦厂家或自己构建测试版的镜像,也可以使用非docker的方式部署
restart: always
ports:
- "5236:5236"
volumes:
- /data/dm8:/home/dmdba/data #数据存放目录的挂载
environment:
- TZ=Asia/Shanghai
- CASE_SENSITIVE=0 #设置数据库大小写不敏感
- SYSDBA_PWD=InC3tmU4bijT4vkl #SYSDBA用户的密码
- SYSAUDITOR_PWD=InC3tmU4bijT4vkl #SYSAUDITOR用户的密码
1.2、启动
docker-compose up -d
二、PostgreSQL数据库
2.1、创建挂载目录
#创建挂载目录
mkdir /data/postgresql
#给挂载目录授权
chmod -R 777 /data/postgresql
2.3、编写docker-compose.yml脚本文件
sudo vi docker-compose.yml
填写下方内容然后保存即可
version: "3.8"
services:
dolphinscheduler-postgresql:
privileged: true
restart: always
image: bitnami/postgresql:15.2.0
container_name: dolphinscheduler-postgresql
ports:
- "5432:5432"
environment:
POSTGRESQL_USERNAME: root #数据库用户名
POSTGRESQL_PASSWORD: Idgf~V4pco&PF #数据库密码
POSTGRESQL_DATABASE: dolphinscheduler #数据库名称
volumes:
- /data/postgresql:/bitnami/postgresql #数据存放目录的挂载
2.3、启动
docker-compose up -d
三、Redis
3.1、编写docker-compose.yml脚本文件
sudo vi docker-compose.yml
填写下方内容然后保存即可
version: "3.2"
services:
redis:
privileged: true
restart: always
image: redis
container_name: redis
command: redis-server --requirepass J98%FHF#9h@e88h9fre9 #redis的密码
ports:
- "6379:6379"
3.2、启动
docker-compose up -d
四、RabbitMQ
4.1、编写docker-compose.yml脚本文件
sudo vi docker-compose.yml
填写下方内容然后保存即可
version: '3.3'
services:
rabbitmq:
restart: always
privileged: true
image: rabbitmq:3.12-management
ports:
- 5672:5672
- 15672:15672
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=Ej^iUNFLp9MQouc1
4.2、启动
docker-compose up -d
五、Nginx
5.1、创建www目录
sudo mkdir www
5.2、创建nginx.conf
sudo vi nginx.conf
填写下方内容然后保存即可
user root;
worker_processes 8;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 3000;
}
http {
include /etc/nginx/mime.types;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
server_tokens off;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 300;
keepalive_requests 1000;
client_header_timeout 60;
client_body_timeout 60;
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 16k;
proxy_busy_buffers_size 32k;
proxy_max_temp_file_size 0;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_comp_level 1;
gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript application/x-httpd-php image/jpeg image/gif image/png;
gzip_vary off;
gzip_disable "MSIE [1-6]\.";
include http_vhost/*.conf;
}
5.3、创建http_vhost目录
sudo mkdir http_vhost
5.4、创建qdata.conf
sudo vi qdata.conf
填写下方内容然后保存即可(下方内容中的ip需调整为服务器的内网ip例如192.168.3.35)
server {
listen 8082;
server_name 192.168.3.35;
client_max_body_size 500M;
add_header Strict-Transport-Security "max-age=31536000";
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
keepalive_timeout 600s;
send_timeout 60s;
location / {
proxy_read_timeout 600;
charset utf-8;
root /usr/share/nginx/qdata;
try_files $uri $uri/ /index.html;
index index.html index.htm;
}
location /prod-api {#qData接口的转发
add_header 'Access-Control-Allow-Methods' 'GET, POST,PUT,DELETE, OPTIONS';
proxy_read_timeout 6000;
rewrite ^/prod-api/(.*)$ /$1 break;
proxy_set_header X-Real-Ip $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_pass http://192.168.3.35:8081;
}
location /jmreport {#报表的转发
add_header 'Access-Control-Allow-Methods' 'GET, POST,PUT,DELETE, OPTIONS';
proxy_read_timeout 6000;
rewrite ^/prod-api/(.*)$ /$1 break;
proxy_set_header X-Real-Ip $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_pass http://192.168.3.35:8081;
}
location /drag {#大屏的转发
add_header 'Access-Control-Allow-Methods' 'GET, POST,PUT,DELETE, OPTIONS';
proxy_read_timeout 6000;
rewrite ^/prod-api/(.*)$ /$1 break;
proxy_set_header X-Real-Ip $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_pass http://192.168.3.35:8081;
}
}
5.5、编写docker-compose.yml脚本文件
sudo vi docker-compose.yml
填写下方内容然后保存即可
version: "2.0"
services:
nginx:
privileged: true
image: nginx:1.21.5
restart: always
ports:
- "8084:8084" #qportal访问端口
volumes:
- ./www:/usr/share/nginx #静态文件目录
- ./nginx.conf:/etc/nginx/nginx.conf:ro #nginx配置文件挂载
- ./http_vhost/:/etc/nginx/http_vhost/:ro #网站配置文件挂载
logging:
driver: "json-file"
options:
max-size: "2000m"
max-file: "100"
5.6、启动
docker-compose up -d
六、ZooKeeper
6.1、下载Docker Compose 离线包
https://dlcdn.apache.org/zookeeper/zookeeper-3.8.4/apache-zookeeper-3.8.4-bin.tar.gz
下载(apache-zookeeper-3.8.4-bin.tar.gz)并上传到服务器即可
6.2、解压及配置文件的复制
sudo tar -zxvf apache-zookeeper-3.8.4-bin.tar.gz
# 重命名
sudo mv apache-zookeeper-3.8.4-bin zookeeper
# 复制配置文件
sudo cp -r zookeeper/conf/zoo_sample.cfg zookeeper/conf/zoo.cfg
6.3、修改配置文件
sudo vi zookeeper/conf/zoo.cfg
# dataDir=/tmp/zookeeper 改为 dataDir=./tmp/zookeeper
6.4、启动
sudo ./zookeeper/bin/zkServer.sh start
# 启动成功后,查看状态
sudo ./zookeeper/bin/zkServer.sh status
# 正常输出
# ZooKeeper JMX enabled by default
# Using config: /opt/ds2/zookeeper/bin/../conf/zoo.cfg
# Client port found: 2181. Client address: localhost. Client SSL: false.
# Mode: standalone
# zkServer.sh如何启动提示Error: JAVA_HOME is not set and java could not be found in PATH.
vi ./zookeeper/conf/java.env
#添加下面的内容即可
export JAVA_HOME=/opt/java/openjdk
# zkServer.sh 启动失败看看8080是否被占用 被占用在 conf/zoo.cfg 添加admin.serverPort=8078即可
七、Hadoop
7.1、编写docker-compose.yml脚本文件
sudo vi docker-compose.yml
填写下方内容然后保存即可
version: "3.8"
services:
namenode:
image: apache/hadoop:3.3.6
hostname: namenode
container_name: namenode
ports:
- "9870:9870" # NameNode Web UI
- "8020:8020" # HDFS RPC
- "9000:9000"
env_file: ./config
environment:
ENSURE_NAMENODE_DIR: "/tmp/hadoop-root/dfs/name"
command: ["hdfs", "namenode"]
networks:
hadoop:
ipv4_address: 172.32.1.2
datanode:
image: apache/hadoop:3.3.6
hostname: datanode
container_name: datanode
ports:
- "9866:9866"
env_file: ./config
command: ["hdfs", "datanode"]
networks:
hadoop:
ipv4_address: 172.32.1.3
volumes:
- /data/hadoop:/data/dfs #挂载数据
resourcemanager:
image: apache/hadoop:3.3.6
hostname: resourcemanager
container_name: resourcemanager
ports:
- "8088:8088" # YARN Web UI
env_file: ./config
command: ["yarn", "resourcemanager"]
networks:
hadoop:
ipv4_address: 172.32.1.5
nodemanager:
image: apache/hadoop:3.3.6
hostname: nodemanager
container_name: nodemanager
env_file: ./config
command: ["yarn", "nodemanager"]
networks:
hadoop:
ipv4_address: 172.32.1.6
networks:
hadoop:
driver: bridge
ipam:
config:
- subnet: 172.32.1.0/24
7.2、配置文件
sudo vi config
#添加下方内容
CORE-SITE.XML_fs.default.name=hdfs://namenode
CORE-SITE.XML_fs.defaultFS=hdfs://namenode
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:8020
HDFS-SITE.XML_dfs.replication=1
HDFS-SITE.XML_dfs.datanode.use.datanode.hostname=true
HDFS-SITE.XML_dfs.disk.balancer.enabled=true
HDFS-SITE.XML_dfs.storage.policy.hot.creation.fallbacks=ARCHIVE
HDFS-SITE.XML_dfs.datanode.data.dir=/data/dfs
MAPRED-SITE.XML_mapreduce.framework.name=yarn
MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
YARN-SITE.XML_yarn.resourcemanager.hostname=resourcemanager
YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false
YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=600
YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false
YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=*
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=*
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings=
CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false
7.3、启动
docker-compose up -d
7.4、创建基础目录
docker exec -it namenode bash
hdfs dfs -mkdir -p /tmp/etl
hdfs dfs -chmod -R 777 /tmp/etl
7.5、修改hosts
#在需要连接hdfs的服务器例如spark所在服务器修改hosts
sudo vi /etc/hosts
#添加下方内容 172.32.1.2 请改为服务器的内ip即可,如果是hdfs如果是外网访问请填写外网ip
172.32.1.2 datanode
八、MongoDB
8.1、编写docker-compose.yml脚本文件
sudo vi docker-compose.yml
填写下方内容然后保存即可
version: "3.0"
services:
mongodb:
container_name: mongodb
restart: always
image: mongo:4.4
ports:
- 27017:27017
volumes:
- /data/mongoDB:/data/db #挂载数据
environment:
MONGO_INITDB_ROOT_USERNAME: sjzt #账号
MONGO_INITDB_ROOT_PASSWORD: Desl9Y4eIQP1BHh7 #密码
8.2、启动
docker-compose up -d