头图

image.png

服务器 2核4G

filebat->kafka->logstash->elasticsearch->kibana

Elasticsearch

Node-1

# 前置准备
yum install -y wget sudo vim
cat >> /usr/lib/sysctl.d/00-system.conf <<EOF
net.ipv4.ip_forward=1
EOF
cat > /etc/sysctl.conf <<EOF
vm.max_map_count=262144
EOF
cat > /etc/security/limits.conf <<EOF
* hard nofile 65536
* soft nofile 65536
* hard nproc 65536
* soft nproc 65536
EOF
systemctl restart network;sudo sysctl -p;
ulimit -S -n;ulimit -H -n
# 安装docker
sudo wget -P /tmp https://qn.kentcloud.top/shell/docker/docker.sh;
sudo chmod +x /tmp/docker.sh && sudo /tmp/docker.sh;
# 创建目录上传证书
mkdir -p /usr/local/es/certs;
# 上传证书到/usr/local/es/config/certs
wget -P /usr/local/es/certs/ https://qn.kentcloud.top/shell/elkstack/elastic-certificates.p12
#链接:https://pan.baidu.com/s/1a0P9nnrtgjPsPjYA_UI3uw?pwd=clsg 提取码:clsg
wget -P /usr/local/es/certs/ https://qn.kentcloud.top/shell/elkstack/http.p12
# 链接:https://pan.baidu.com/s/1ApvTjBVvj1GV1Vr85nMgZw?pwd=fhs0 提取码:fhs0
mkdir -p {/usr/local/es/config,/usr/local/es/logs,/usr/local/es/data,/usr/local/es/plugins}
# es用户
useradd es;echo "123" | passwd --stdin es;
# 授权开端口
firewall-cmd --zone=public --add-port=9100/tcp --permanent;
firewall-cmd --zone=public --add-port=9200/tcp --permanent;
firewall-cmd --zone=public --add-port=9300/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;firewall-cmd --list-all;
# 写入配置文件
cat > /usr/local/es/config/elasticsearch.yml <<  "EOF"
cluster.name: es-cluster
node.name: node-1
network.host: 0.0.0.0
network.publish_host: 10.0.0.21
http.port: 9200
transport.port: 9300
bootstrap.memory_lock: true
discovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]
cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.monitoring.collection.enabled: true
xpack.monitoring.exporters.my_local.type: local
xpack.monitoring.exporters.my_local.use_ingest: false
xpack.security.enabled: true
xpack.security.authc.api_key.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.http.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.http.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.http.ssl.truststore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"
EOF
chown es:es /usr/local/es/ -R
# 创建容器
docker pull elasticsearch:7.17.0
docker run --name es \
-d --network=bridge \
--restart=always \
--publish 9200:9200 \
--publish 9300:9300 \
--privileged=true \
--ulimit nofile=655350 \
--ulimit memlock=-1 \
--memory=2G \
--memory-swap=-1 \
--volume /usr/local/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
--volume /usr/local/es/data/:/usr/share/elasticsearch/data/ \
--volume /usr/local/es/logs/:/usr/share/elasticsearch/logs/ \
--volume /usr/local/es/certs/:/usr/share/elasticsearch/config/certs/ \
--volume /etc/localtime:/etc/localtime \
-e TERM=dumb \
-e ELASTIC_PASSWORD='elastic' \
-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \
-e path.data=data \
-e path.logs=logs \
-e node.master=true \
-e node.data=true \
-e node.ingest=false \
-e node.attr.rack="0402-K03" \
-e gateway.recover_after_nodes=1 \
-e bootstrap.memory_lock=true \
-e bootstrap.system_call_filter=false \
-e indices.fielddata.cache.size="25%" \
elasticsearch:7.17.0
#登陆账号密码elastic/elastic

Node-2

# 前置准备
yum install -y wget sudo vim
cat >> /usr/lib/sysctl.d/00-system.conf <<EOF
net.ipv4.ip_forward=1
EOF
cat > /etc/sysctl.conf <<EOF
vm.max_map_count=262144
EOF
cat > /etc/security/limits.conf <<EOF
* hard nofile 65536
* soft nofile 65536
* hard nproc 65536
* soft nproc 65536
EOF
systemctl restart network;sudo sysctl -p;
ulimit -S -n;ulimit -H -n
# 安装docker
sudo wget -P /tmp https://qn.kentcloud.top/shell/docker/docker.sh;
sudo chmod +x /tmp/docker.sh && sudo /tmp/docker.sh;
# 创建目录上传证书
mkdir -p /usr/local/es/certs;
# 上传证书到/usr/local/es/config/certs
# 上传证书到/usr/local/es/config/certs
wget -P /usr/local/es/certs/ https://qn.kentcloud.top/shell/elkstack/elastic-certificates.p12
#链接:https://pan.baidu.com/s/1a0P9nnrtgjPsPjYA_UI3uw?pwd=clsg 提取码:clsg
wget -P /usr/local/es/certs/ https://qn.kentcloud.top/shell/elkstack/http.p12
# 链接:https://pan.baidu.com/s/1ApvTjBVvj1GV1Vr85nMgZw?pwd=fhs0 提取码:fhs0
mkdir -p {/usr/local/es/config,/usr/local/es/logs,/usr/local/es/data,/usr/local/es/plugins}
# es用户
useradd es;echo "123" | passwd --stdin es;
# 授权开端口
firewall-cmd --zone=public --add-port=9100/tcp --permanent;
firewall-cmd --zone=public --add-port=9200/tcp --permanent;
firewall-cmd --zone=public --add-port=9300/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;firewall-cmd --list-all;
## 写入配置文件
cat > /usr/local/es/config/elasticsearch.yml <<  "EOF"
cluster.name: es-cluster
node.name: node-2
network.host: 0.0.0.0
network.publish_host: 10.0.0.22
http.port: 9200
transport.port: 9300
bootstrap.memory_lock: true
discovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]
cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.monitoring.collection.enabled: true
xpack.monitoring.exporters.my_local.type: local
xpack.monitoring.exporters.my_local.use_ingest: false
xpack.security.enabled: true
xpack.security.authc.api_key.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.http.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.http.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.http.ssl.truststore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"
EOF
chown es:es /usr/local/es/ -R
# 创建容器
docker pull elasticsearch:7.17.0
docker run --name es \
-d --network=bridge \
--restart=always \
--publish 9200:9200 \
--publish 9300:9300 \
--privileged=true \
--ulimit nofile=655350 \
--ulimit memlock=-1 \
--memory=2G \
--memory-swap=-1 \
--volume /usr/local/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
--volume /usr/local/es/data/:/usr/share/elasticsearch/data/ \
--volume /usr/local/es/logs/:/usr/share/elasticsearch/logs/ \
--volume /usr/local/es/certs/:/usr/share/elasticsearch/config/certs/ \
--volume /etc/localtime:/etc/localtime \
-e TERM=dumb \
-e ELASTIC_PASSWORD='elastic' \
-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \
-e path.data=data \
-e path.logs=logs \
-e node.master=true \
-e node.data=true \
-e node.ingest=false \
-e node.attr.rack="0402-K03" \
-e gateway.recover_after_nodes=1 \
-e bootstrap.memory_lock=true \
-e bootstrap.system_call_filter=false \
-e indices.fielddata.cache.size="25%" \
elasticsearch:7.17.0
#登陆账号密码elastic/elastic

Node-3

# 前置准备
yum install -y wget sudo vim
cat >> /usr/lib/sysctl.d/00-system.conf <<EOF
net.ipv4.ip_forward=1
EOF
cat > /etc/sysctl.conf <<EOF
vm.max_map_count=262144
EOF
cat > /etc/security/limits.conf <<EOF
* hard nofile 65536
* soft nofile 65536
* hard nproc 65536
* soft nproc 65536
EOF
systemctl restart network;sudo sysctl -p;
ulimit -S -n;ulimit -H -n
# 安装docker
sudo wget -P /tmp https://qn.kentcloud.top/shell/docker/docker.sh;
sudo chmod +x /tmp/docker.sh && sudo /tmp/docker.sh;
# 创建目录上传证书
mkdir -p /usr/local/es/certs;
# 上传证书到/usr/local/es/config/certs
wget -P /usr/local/es/certs/ https://qn.kentcloud.top/shell/elkstack/elastic-certificates.p12
#链接:https://pan.baidu.com/s/1a0P9nnrtgjPsPjYA_UI3uw?pwd=clsg 提取码:clsg
wget -P /usr/local/es/certs/ https://qn.kentcloud.top/shell/elkstack/http.p12
# 链接:https://pan.baidu.com/s/1ApvTjBVvj1GV1Vr85nMgZw?pwd=fhs0 提取码:fhs0
mkdir -p {/usr/local/es/config,/usr/local/es/logs,/usr/local/es/data,/usr/local/es/plugins}
# es用户
useradd es;echo "123" | passwd --stdin es;
# 授权开端口
firewall-cmd --zone=public --add-port=9100/tcp --permanent;
firewall-cmd --zone=public --add-port=9200/tcp --permanent;
firewall-cmd --zone=public --add-port=9300/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;firewall-cmd --list-all;
## 写入配置文件
cat > /usr/local/es/config/elasticsearch.yml <<  "EOF"
cluster.name: es-cluster
node.name: node-3
network.host: 0.0.0.0
network.publish_host: 10.0.0.23
http.port: 9200
transport.port: 9300
bootstrap.memory_lock: true
discovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]
cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.monitoring.collection.enabled: true
xpack.monitoring.exporters.my_local.type: local
xpack.monitoring.exporters.my_local.use_ingest: false
xpack.security.enabled: true
xpack.security.authc.api_key.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.http.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.http.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.http.ssl.truststore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"
EOF
chown es:es /usr/local/ -R
# 创建容器
docker pull elasticsearch:7.17.0
docker run --name es \
-d --network=bridge \
--restart=always \
--publish 9200:9200 \
--publish 9300:9300 \
--privileged=true \
--ulimit nofile=655350 \
--ulimit memlock=-1 \
--memory=2G \
--memory-swap=-1 \
--volume /usr/local/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
--volume /usr/local/es/data/:/usr/share/elasticsearch/data/ \
--volume /usr/local/es/logs/:/usr/share/elasticsearch/logs/ \
--volume /usr/local/es/certs/:/usr/share/elasticsearch/config/certs/ \
--volume /etc/localtime:/etc/localtime \
-e TERM=dumb \
-e ELASTIC_PASSWORD='elastic' \
-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \
-e path.data=data \
-e path.logs=logs \
-e node.master=true \
-e node.data=true \
-e node.ingest=false \
-e node.attr.rack="0402-K03" \
-e gateway.recover_after_nodes=1 \
-e bootstrap.memory_lock=true \
-e bootstrap.system_call_filter=false \
-e indices.fielddata.cache.size="25%" \
elasticsearch:7.17.0
#登陆账号密码elastic/elastic

Kibana

Master

#安装Docker
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cat >> /etc/docker/daemon.json << "EOF"
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"]
}
EOF
#安装docker-compose
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
#重启docker
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a
reboot;
#开启防火墙
firewall-cmd --zone=public --add-port=5601/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
#下载镜像
docker pull kibana:7.17.0
docker run --name kibana -d kibana:7.17.0
docker stop kibana
#创建映射目录
docker cp kibana:/usr/share/kibana /usr/local/
docker commit kibana kibana2
docker rm kibana
#写入配置文件
cat > /usr/local/kibana/config/kibana.yml << EOF
# 配置服务器的名称
server.name: kibana
# Kibana 默认监听端口5601,如果需要改变就修改这个配置
server.port: 5601
# 设置中文
i18n.locale: "zh-CN"
# Kibana 部署服务器IP,如果是单网卡配置0.0.0.0即可,如果是多网卡需要配置IP
server.host: "0.0.0.0"
# 创建一个kibana索引
kibana.index: ".kibana"
# 超时
server.shutdownTimeout: "5s"

# Kibana-Https
#server.ssl.enabled: true
# 访问地址
server.publicBaseUrl: "http://10.0.0.11:5601"
# p12路径
#server.ssl.keystore.path: certs/kibana-server.p12
#server.ssl.certificate: certs/kibana-server.crt
#server.ssl.key: certs/kibana-server.key

# ES-Https 证书路径
#elasticsearch.ssl.verificationMode: certificate
#elasticsearch.ssl.certificateAuthorities: certs/elasticsearch-ca.pem
# 配置ES的集群节点地址
elasticsearch.hosts: ["http://10.0.0.21:9200","http://10.0.0.22:9200","http://10.0.0.23:9200"]
# es的账号密码
elasticsearch.username: "elastic"
elasticsearch.password: "elastic"
monitoring.ui.container.elasticsearch.enabled: true
EOF
#证书
#创建目录上传证书
mkdir -p /usr/local/kibana/certs/
#上传证书到/usr/local/es/config/certs
wget -P /usr/local/kibana/certs/ https://qn.kentcloud.top/shell/elkstack/elastic-stack-ca.p12
#链接:https://pan.baidu.com/s/1OCL-6ZqpSg1xM9eR9SOEuQ 提取码:wrov 
#重启容器
docker restart kibana
# 创建Kibana容器
docker run -it -d --name=kibana \
--network=bridge \
--restart=always \
--publish 5601:5601 \
--privileged=true \
-v /usr/local/kibana:/usr/share/kibana \
kibana2

Logstash

Master

#安装Docker
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cd /etc/docker;
cat > daemon.json <<EOF
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"]
}
EOF

#安装docker-compose
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v

#重启docker
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a

#开启防火墙
firewall-cmd --zone=public --add-port=5044/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;

#统一目录
if [ ! -d "/data/software" ]; then
    mkdir -p /data/software/
fi
if [ ! -d "/data/modules/mysql/" ]; then
    mkdir -p /data/modules/
fi

mkdir -p /data/modules/logstash/config/
cat > /data/modules/logstash/config/logstash.yml << EOF
http.host: "0.0.0.0"
path.config: /usr/share/logstash/config/conf.d/*.conf
path.logs: /usr/share/logstash/logs
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: elastic
xpack.monitoring.elasticsearch.hosts: [ "http://10.0.0.21:9200","http://10.0.0.22:9200","http://10.0.0.23:9200" ]
EOF
mkdir -p /data/modules/logstash/config/conf.d
cat > /data/modules/logstash/config/conf.d/logstash-test.conf << EOF
input {
  beats {
    port => 5044
  }
  file {
    #Nginx日志目录
    path => "/usr/local/nginx/logs/access.log"
    start_position => "beginning"
  }
}
filter {
  if [path] =~ "access" {
    mutate { replace => { "type" => "nginx_access" } }
    grok {
      match => { "message" => "%{COMBINEDAPACHELOG}" }
    }
  }
  date {
    #时间戳
    match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
  }
}
output {
  elasticsearch {
    #接受主机
    hosts => ["10.0.0.21:9200","10.0.0.22:9200","10.0.0.23:9200"]
    index => "kafka_test"
    user => "elastic"
    password => "elastic"
  }
  stdout { codec => rubydebug }
}

EOF

#创建容器网络
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
elk-net

#启动容器
docker pull elastic/logstash:7.17.0
docker run -dit --name=logstash \
-d --network=elk-net \
--ip=10.10.10.12 \
--publish 5044:5044 \
--restart=always --privileged=true \
-e ES_JAVA_OPTS="-Xms512m -Xmx512m" \
-v /data/modules/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml \
-v /data/modules/logstash/config/conf.d:/usr/share/logstash/config/conf.d \
elastic/logstash:7.17.0

Fliebeat

client

#安装Docker
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cd /etc/docker;
cat > daemon.json <<EOF
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"]
}
EOF

#安装docker-compose
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a
#开启端口
firewall-cmd --zone=public --add-port=2181/tcp --permanent;
firewall-cmd --zone=public --add-port=5601/tcp --permanent;
firewall-cmd --zone=public --add-port=5044/tcp --permanent;
firewall-cmd --zone=public --add-port=9200/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
#写入配置文件
mkdir -p /usr/local/filebeat/
mkdir -p /usr/local/filebeat/logs
mkdir -p /usr/local/filebeat/modules.d
cd /usr/local/filebeat/
cat > filebeat.yml << "EOF"
filebeat.inputs:
- type: filestream
  enabled: false
  paths:
    - /var/log/*.log
    #- /usr/share/filebeat/logs/*.log
    #- /usr/local/nginx/logs/*.log
    #- c:\programdata\elasticsearch\logs\*

#====================Nginx====================#
- type: filestream
  enabled: true
  paths:
    - /usr/share/filebeat/logs/nginx/*.log

#====================Mysql====================#
- type: log
  enabled: true
  paths:
    - /usr/share/filebeat/logs/mysql/*.log

# ============================== Filebeat modules ==============================

filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
  # Period on which files under path should be checked for changes
  #reload.period: 10s

# ======================= Elasticsearch template setting =======================

setup.template.settings:
  index.number_of_shards: 3
  #index.codec: best_compression
  #_source.enabled: false

# =================================== Kibana ===================================
setup.kibana:
  host: "10.0.0.11:5601"

# ================================== Outputs ===================================

# Configure what output to use when sending the data collected by the beat.

# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
  hosts: ["10.0.0.21:9200","10.0.0.22:9200","10.0.0.23:9200"]

  # Protocol - either `http` (default) or `https`.
  #protocol: "https"
  # Authentication credentials - either API key or username/password.
  #api_key: "id:api_key"
  username: "elastic"
  password: "elastic"

# ------------------------------ Logstash Output -------------------------------
#output.logstash:
  # The Logstash hosts
  #hosts: ["localhost:5044"]

  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"

# ================================= Processors =================================
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~

#filebeat.modules:
#  - module: nginx
#  - module: mysql
#  - module: system

#output.kafka:
#    version: "2.8.1"
#    enabled: true
#    hosts: 10.0.0.21:9092
#    topic: test  # topic name
#    username: xxxxxx   # kafka username
#    password: xxxxxx   # kafka password
#    required_acks: 1
#    compression: gzip
#    max_message_bytes: 1000000
#    codec.format:
#       string: '%{[message]}'

EOF
#创建容器网络
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
elk-net

#下载镜像
docker pull elastic/filebeat:7.17.0

#日志修改777,日志采集不用每次需重新加载
chmod 777 /usr/local/filebeat/logs
#文件赋权
chmod 755 /usr/local/filebeat/filebeat.yml

docker stop filebeat && docker rm filebeat

方法一(硬链接采集)

#创建容器 (容器外映射容器内日志目录,方便采集)
docker run --name filebeat -d \
-d --network=elk-net \
--ip=10.10.10.66 \
--publish 9200:9200 \
--publish 9300:9300 \
--publish 5601:5601 \
--publish 5044:5044 \
--restart=always \
--privileged=true \
-v /usr/local/filebeat/logs:/usr/share/filebeat/logs \
-v /usr/local/filebeat/modules.d:/usr/share/filebeat/modules.d \
-v /usr/local/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml \
elastic/filebeat:7.17.0

#重启容器
docker restart filebeat
docker logs filebeat

# 须知:注意采集日志logs目录的权限777
#硬连接指向采集目录
ln /usr/local/nginx/logs/error.log /usr/local/filebeat/logs/nginx/error.log
ln /usr/local/nginx/logs/access.log /usr/local/filebeat/logs/nginx/access.log
ln /data/modules/mysql/mysql.err /usr/local/filebeat/logs/mysql/mysql.err 

Nginx-Json

vim /usr/local/nginx/conf/nginx.conf
#在server添加
access_log  logs/access.log json;
#在http块添加
log_format json '{ "@timestamp": "$time_iso8601", '
'"remote_addr": "$remote_addr", '
'"referer": "$http_referer", '
'"request": "$request", '
'"status": $status, '
'"bytes":$body_bytes_sent, '
'"agent": "$http_user_agent", '
'"x_forwarded": "$http_x_forwarded_for", '
'"upstr_addr": "$upstream_addr",'
'"upstr_host": "$upstream_http_host",'
'"ups_resp_time": "$upstream_response_time" }';
#验证格式重启nginx
nginx -t
systemctl restart nginx

image.png

方法二(直采:应用需安装到容器挂载目录)

#容器挂载目录:/usr/local/
#创建容器 (容器外映射容器内日志目录,方便采集)
docker run --name filebeat -d \
-d --network=elk-net \
--ip=10.10.10.66 \
--publish 9200:9200 \
--publish 9300:9300 \
--publish 5601:5601 \
--publish 5044:5044 \
--restart=always \
--privileged=true \
-v /usr/local/:/usr/share/filebeat/logs \
-v /usr/local/filebeat/modules.d:/usr/share/filebeat/modules.d \
-v /usr/local/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml \
elastic/filebeat:7.17.0

#重启容器
docker restart filebeat
docker logs filebeat

Kafka+Zookeeper

Node-1

# 安装docker
yum install -y wget sudo && sudo wget -P /tmp https://qn.kentcloud.top/shell/docker/docker.sh
sudo chmod +x /tmp/docker.sh && sudo /tmp/docker.sh
# Zookeeper
mkdir -p {/usr/local/zookeeper/data/,/usr/local/zookeeper/logs/,/usr/local/zookeeper/conf/}
#(myid)需要与server.(id)相同
echo "1" > /usr/local/zookeeper/data/myid
#写入配置文件
cat > /usr/local/zookeeper/conf/zoo.cfg << EOF
#集群节点间心跳检查间隔,单位是毫秒,后续所有和时间相关的配置都是该值的倍数,进行整数倍的配置,如4等于8000
tickTime=2000
#集群其他节点与Master通信完成的初始通信时间限制,这里代表10*2000
initLimit=10
#若Master节点在超过syncLimit*tickTime的时间还未收到响应,认为该节点宕机
syncLimit=5
#数据存放目录
dataDir=/data
#ZK日志文件存放路径
dataLogDir=/logs
#ZK服务端口
clientPort=2181
#单个客户端最大连接数限制,0代表不限制
maxClientCnxns=60
#快照文件保留的数量
autopurge.snapRetainCount=3
#清理快照文件和事务日志文件的频率,默认为0代表不开启,单位是小时
autopurge.purgeInterval=1
#server.A=B:C:D 集群设置,
#A表示第几号服务器;
#B是IP;
#C是该服务器与leader通信端口;
#D是leader挂掉后重新选举所用通信端口;两个端口号可以随意
server.1=0.0.0.0:2888:3888
server.2=10.0.0.22:2888:3888
server.3=10.0.0.23:2888:3888
EOF
#开启端口
firewall-cmd --permanent --zone=public --add-port=2181/tcp;
firewall-cmd --permanent --zone=public --add-port=2888/tcp;
firewall-cmd --permanent --zone=public --add-port=3888/tcp;
firewall-cmd --reload
#下载镜像
docker pull zookeeper:3.7.0
#创建容器
docker run -d \
-e TZ="Asia/Shanghai" \
-p 2181:2181 \
-p 2888:2888 \
-p 3888:3888 \
--network=bridge \
--name zookeeper \
--privileged=true \
--restart always \
-v /usr/local/zookeeper/data:/data \
-v /usr/local/zookeeper/logs:/logs \
-v /usr/local/zookeeper/data/myid:/data/myid \
-v /usr/local/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \
zookeeper:3.7.0

#安装Kafka
docker pull wurstmeister/kafka
#创建日志目录
mkdir -p /data/modules/kafka/logs
mkdir -p /data/modules/kafka/config
cat > /data/modules/kafka/config/server.properties << "EOF"
############################# Server Basics #############################
broker.id=-1
############################# Socket Server Settings #############################
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
############################# Log Basics #############################
log.dirs=/kafka/kafka-logs-1
num.partitions=1
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings  #############################
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Retention Policy #############################
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
zookeeper.connect=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181
zookeeper.connection.timeout.ms=18000
############################# Group Coordinator Settings #############################
group.initial.rebalance.delay.ms=0
port=9092
advertised.host.name=10.0.0.21
advertised.port=9092
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://10.0.0.21:9092
EOF
#创建容器
docker run -d --name kafka \
--publish 9092:9092 \
--network=bridge \
--privileged=true \
--restart always \
--link zookeeper \
--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \
--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.21 \
--env KAFKA_ADVERTISED_PORT=9092  \
--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \
-v /data/modules/kafka/logs:/kafka/kafka-logs-1  \
wurstmeister/kafka

# Kafka测试
#进入容器
docker exec -it -u root kafka /bin/bash
cd /opt/kafka/
##新建topic
./bin/kafka-topics.sh --create --zookeeper 10.0.0.21:2181 --replication-factor 1 --partitions 1 --topic test
##查看topic
./bin/kafka-topics.sh --list --zookeeper 10.0.0.21:2181
##生产消息
./bin/kafka-console-producer.sh --broker-list 10.0.0.21:9092 --topic test
##消费topic test1的消息
./bin/kafka-console-consumer.sh --bootstrap-server 10.0.0.21:9092 --topic test --from-beginning
##删除topic test1
./bin/kafka-topics.sh --delete --zookeeper 10.0.0.21:2181  --topic test

Node-2

# 安装docker
yum install -y wget sudo && sudo wget -P /tmp https://qn.kentcloud.top/shell/docker/docker.sh
sudo chmod +x /tmp/docker.sh && sudo /tmp/docker.sh
# Zookeeper
mkdir -p {/usr/local/zookeeper/data/,/usr/local/zookeeper/logs/,/usr/local/zookeeper/conf/}
#(myid)需要与server.(id)相同
ehco "2" > /usr/local/zookeeper/data/myid
#写入配置文件
cat > /usr/local/zookeeper/conf/zoo.cfg << EOF
#集群节点间心跳检查间隔,单位是毫秒,后续所有和时间相关的配置都是该值的倍数,进行整数倍的配置,如4等于8000
tickTime=2000
#集群其他节点与Master通信完成的初始通信时间限制,这里代表10*2000
initLimit=10
#若Master节点在超过syncLimit*tickTime的时间还未收到响应,认为该节点宕机
syncLimit=5
#数据存放目录
dataDir=/data
#ZK日志文件存放路径
dataLogDir=/logs
#ZK服务端口
clientPort=2181
#单个客户端最大连接数限制,0代表不限制
maxClientCnxns=60
#快照文件保留的数量
autopurge.snapRetainCount=3
#清理快照文件和事务日志文件的频率,默认为0代表不开启,单位是小时
autopurge.purgeInterval=1
#server.A=B:C:D 集群设置,
#A表示第几号服务器;
#B是IP;
#C是该服务器与leader通信端口;
#D是leader挂掉后重新选举所用通信端口;两个端口号可以随意
server.1=10.0.0.21:2888:3888
server.2=0.0.0.0:2888:3888
server.3=10.0.0.23:2888:3888
EOF
#开启端口
firewall-cmd --permanent --zone=public --add-port=2181/tcp;
firewall-cmd --permanent --zone=public --add-port=2888/tcp;
firewall-cmd --permanent --zone=public --add-port=3888/tcp;
firewall-cmd --reload
#下载镜像
docker pull zookeeper:3.7.0
#创建容器
docker run -d \
-p 2181:2181 \
-p 2888:2888 \
-p 3888:3888 \
--network=bridge \
--name zookeeper \
--privileged=true \
--restart always \
-v /usr/local/zookeeper/data:/data \
-v /usr/local/zookeeper/logs:/logs \
-v /usr/local/zookeeper/data/myid:/data/myid \
-v /usr/local/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \
zookeeper:3.7.0

# kafka
#安装Kafka
docker pull wurstmeister/kafka
#创建日志目录
mkdir -p /data/modules/kafka/logs
mkdir -p /data/modules/kafka/config
cat > /data/modules/kafka/config/server.properties << "EOF"
############################# Server Basics #############################
broker.id=-1
############################# Socket Server Settings #############################
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
############################# Log Basics #############################
log.dirs=/kafka/kafka-logs-1
num.partitions=1
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings  #############################
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Retention Policy #############################
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
zookeeper.connect=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181
zookeeper.connection.timeout.ms=18000
############################# Group Coordinator Settings #############################
group.initial.rebalance.delay.ms=0
port=9092
advertised.host.name=10.0.0.22
advertised.port=9092
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://10.0.0.22:9092
EOF
#创建容器
docker run -d --name kafka \
--publish 9092:9092 \
--network=es-net \
--ip=10.10.10.42 \
--privileged=true \
--restart always \
--link zookeeper \
--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \
--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.22 \
--env KAFKA_ADVERTISED_PORT=9092  \
--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \
-v /data/modules/kafka/logs:/kafka/kafka-logs-1  \
wurstmeister/kafka

Node-3

# 安装docker
yum install -y wget sudo && sudo wget -P /tmp https://qn.kentcloud.top/shell/docker/docker.sh
sudo chmod +x /tmp/docker.sh && sudo /tmp/docker.sh
# Zookeeper
mkdir -p {/usr/local/zookeeper/data/,/usr/local/zookeeper/logs/,/usr/local/zookeeper/conf/}
#(myid)需要与server.(id)相同
ehco "2" > /usr/local/zookeeper/data/myid
#写入配置文件
cat > /usr/local/zookeeper/conf/zoo.cfg << EOF
#集群节点间心跳检查间隔,单位是毫秒,后续所有和时间相关的配置都是该值的倍数,进行整数倍的配置,如4等于8000
tickTime=2000
#集群其他节点与Master通信完成的初始通信时间限制,这里代表10*2000
initLimit=10
#若Master节点在超过syncLimit*tickTime的时间还未收到响应,认为该节点宕机
syncLimit=5
#数据存放目录
dataDir=/data
#ZK日志文件存放路径
dataLogDir=/logs
#ZK服务端口
clientPort=2181
#单个客户端最大连接数限制,0代表不限制
maxClientCnxns=60
#快照文件保留的数量
autopurge.snapRetainCount=3
#清理快照文件和事务日志文件的频率,默认为0代表不开启,单位是小时
autopurge.purgeInterval=1
#server.A=B:C:D 集群设置,
#A表示第几号服务器;
#B是IP;
#C是该服务器与leader通信端口;
#D是leader挂掉后重新选举所用通信端口;两个端口号可以随意
server.1=10.0.0.21:2888:3888
server.2=10.0.0.22:2888:3888
server.3=0.0.0.0:2888:3888
EOF
#开启端口
firewall-cmd --permanent --zone=public --add-port=2181/tcp;
firewall-cmd --permanent --zone=public --add-port=2888/tcp;
firewall-cmd --permanent --zone=public --add-port=3888/tcp;
firewall-cmd --reload
#下载镜像
docker pull zookeeper:3.7.0
#创建容器
docker run -d \
-p 2181:2181 \
-p 2888:2888 \
-p 3888:3888 \
--network=bridge \
--name zookeeper \
--privileged=true \
--restart always \
-v /usr/local/zookeeper/data:/data \
-v /usr/local/zookeeper/logs:/logs \
-v /usr/local/zookeeper/data/myid:/data/myid \
-v /usr/local/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \
zookeeper:3.7.0

# Kafka
#安装Kafka
docker pull wurstmeister/kafka
#创建日志目录
mkdir -p {/data/modules/kafka/logs/,/data/modules/kafka/config/}
cat > /data/modules/kafka/config/server.properties << "EOF"
############################# Server Basics #############################
broker.id=-1
############################# Socket Server Settings #############################
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
############################# Log Basics #############################
log.dirs=/kafka/kafka-logs-1
num.partitions=1
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings  #############################
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Retention Policy #############################
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
zookeeper.connect=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181
zookeeper.connection.timeout.ms=18000
############################# Group Coordinator Settings #############################
group.initial.rebalance.delay.ms=0
port=9092
advertised.host.name=10.0.0.23
advertised.port=9092
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://10.0.0.23:9092
EOF
#创建容器
docker run -d --name kafka \
--publish 9092:9092 \
--network=bridge \
--privileged=true \
--restart always \
--link zookeeper \
--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \
--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.23 \
--env KAFKA_ADVERTISED_PORT=9092  \
--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \
-v /data/modules/kafka/logs:/kafka/kafka-logs-1  \
wurstmeister/kafka

KentBryce
29 声望1 粉丝