首页 > 代码库 > ELK5.3+Kafka集群配置
ELK5.3+Kafka集群配置
【一】资源准备
# 3台4C*8G, 安装Zookeeper、Kafka、Logstash——Broker(input: filebeat; output: Kafka)
10.101.2.23 10.101.2.24 10.101.2.25
# 2台4C*8G, 安装Logstash——Indexer(input: Kafaka; output: Elasticsearch)
10.101.2.26 10.101.2.27
# 3台8C*16G, 安装Elasticsearch
10.101.2.28 10.101.2.29 10.101.2.30
# 2台2C*4G, 安装Kibana
10.101.2.31 10.101.2.32
# 安装包下载
elasticsearch-5.3.1.tar.gz
filebeat-5.3.1-linux-x86_64.tar.gz
jdk-8u131-linux-x64.tar.gz
kafka_2.12-0.10.2.0.tgz
kibana-5.3.1-linux-x86_64.tar.gz
logstash-5.3.1.tar.gz
node-v7.9.0-linux-x64.tar.gz
zookeeper-3.4.10.tar.gz
nginx-1.12.0.tar.gz
统一上传至服务器 /usr/local/src 目录下
【二】通用配置
# 配置hosts
vi /etc/hosts
10.101.2.23 vmserver2x23
10.101.2.24 vmserver2x24
10.101.2.25 vmserver2x25
10.101.2.26 vmserver2x26
10.101.2.27 vmserver2x27
10.101.2.28 vmserver2x28
10.101.2.29 vmserver2x29
10.101.2.30 vmserver2x30
10.101.2.31 vmserver2x31
10.101.2.32 vmserver2x32
# 配置ssh访问限制,假如有必要的话
vi /etc/hosts.allow
【三】安装Elasticsearch集群
# 系统环境
vi /etc/sysctl.conf
vm.max_map_count=262144
fs.file-max=65536
执行 sysctl -p 使配置生效
vi /etc/security/limits.conf #打开文件数
* soft nofile 65536
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096
* - memlock unlimited
vi /etc/security/limits.d/90-nproc.conf
* soft nproc 2048
# 配置Java环境变量
cd /usr/local/src/
tar -xvf jdk-8u131-linux-x64.tar.gz
mv jdk1.8.0_131 /usr/share/
vi /etc/profile #在末尾添加下面3行后, 保存退出
export JAVA_HOME=/usr/share/jdk1.8.0_131
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
source /etc/profile 使得配置立即生效
# 解压elasticsearch
cd /usr/local/src
tar -xvf elasticsearch-5.3.1.tar.gz
mv elasticsearch-5.3.1 /usr/local
vi /usr/local/elasticsearch-5.3.1/bin/elasticsearch #设置ES_JAVA_OPTS参数
ES_JAVA_OPTS="-Xms8g -Xmx8g" # 去掉该行前的注释后, 一定要删除后面的这一串 ./bin/elasticsearch
# 新增elastic组及用户, 因为elasticsearch不允许root用户启动
groupadd elastic
useradd elastic -g elastic
passwd elastic # 设定用户密码
chown -R elastic:elastic /usr/local/elasticsearch-5.3.1/
# 配置elasticsearch.ywl, 主要参数如下
cluster.name: bsd-elk
node.name: elk-2-30 # 每个节点不同
node.master: true
node.data: true
bootstrap.memory_lock: true
bootstrap.system_call_filter: false # centos7以下版本需要将这个参数设置为false
network.host: 0.0.0.0
http.port: 9200
discovery.zen.ping.unicast.hosts: ["10.101.2.28:9300", "10.101.2.29:9300", "10.101.2.30:9300"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 60s # 网上大部分文章这个参数都写成了discovery.zen.ping.timeout
http.cors.enabled: true
http.cors.allow-origin: "*"
# 下载node-v7.9.0-linux-x64.tar.gz, 解压后mv到/usr/local/nodejs-7.9.0
chown -R elastic:elastic nodejs-7.9.0/
cd /usr/local/nodejs-7.9.0
ln -s /usr/local/nodejs-7.9.0/bin/node /usr/local/bin
ln -s /usr/local/nodejs-7.9.0/bin/npm /usr/local/bin
# 安装head插件, 5.x以上的elasticsearch暂没有找到离线安装的方法, 所以需要服务器开通外网访问权限
# 5.x版本是里程碑式的更新, 网上大部分的文章都是以前版本的插件安装
cd /usr/local/elasticsearch-5.3.1
git clone https://github.com/mobz/elasticsearch-head.git
如果没有git工具先安装, yum install git
cd elasticsearch-head
npm install -g grunt --registry=https://registry.npm.taobao.org # 安装grunt
npm install # 安装head
npm install grunt --save # 如果 node_modules/grunt/bin/目录下没有 grunt文件, 就执行以下该命令
vi Gruntfile.js 修改connect, 在options里添加本机IP hostname: ‘10.101.2.30‘,
cd /usr/local/elasticsearch-5.3.1
bin/elasticsearch -d # 启动elasticsearch
cd elasticsearch-head
node_modules/grunt/bin/grunt server & # 启动head插件
访问 http://10.101.2.30:9100
# 安装bigdesk插件
cd /usr/local/elasticsearch-5.3.1
git clone https://github.com/hlstudio/bigdesk
cd bigdesk/_site
python -m SimpleHTTPServer & # 启动bigdesk插件
访问 http://10.101.2.30:8000
另外两台机器(10.101.2.28 10.101.2.29)按此步骤同样配置, master和data节点的选择看实际情况, 我的3台全是混合节点
所有elasticsearch启动后, 访问head如果能看到3个节点的集群信息, 就可以了
【四】安装ZooKeeper集群
# zookeeper依赖java, java环境配置参照上面
# 解压zookeeper-3.4.10.tar.gz
cd /usr/local/src
tar -xvf zookeeper-3.4.10.tar.gz
mv zookeeper-3.4.10 /usr/local
mkdir /usr/local/zookeeper-3.4.10/data # 每个节点上创建一个数据存放目录
# 创建myid文件
echo 23 >/usr/local/zookeeper-3.4.10/data/myid # 10.101.2.23、24、25三台机器上myid的值分别放 23 24 25
# 配置zoo.cfg
cd /usr/local/zookeeper-3.4.10/conf/
cp zoo_sample.cfg zoo.cfg
vi zoo.cfg # 主要参数如下
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper-3.4.10/data
clientPort=2181
server.23=10.101.2.23:2888:3888
server.24=10.101.2.24:2888:3888
server.25=10.101.2.25:2888:3888
# 复制配置文件到其他节点
scp zoo.cfg root@ip:/usr/local/zookeeper-3.4.10/conf/
# 启动zookeeper集群
cd /usr/local/zookeeper-3.4.10/
bin/zkServer.sh start
bin/zkServer.sh status # 主节点会返回 Mode: leader, 从节点返回 Mode: follower
至此zookeeper集群配置完毕
【五】配置kafka集群
# 解压kafka_2.12-0.10.2.0.tgz, 创建数据目录
cd /usr/local
tar -xvf src/kafka_2.12-0.10.2.0.tgz
mkdir /usr/local/kafka_2.12-0.10.2.0/data
# 配置server.propertites
cd /usr/local/kafka_2.12-0.10.2.0/config
vi server.properties # 主要参数如下
broker.id=23 # 10.101.2.23、24、25三台机器上id的值分别放 23 24 25
delete.topic.enable=true
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka_2.12-0.10.2.0/data
num.partitions=6
num.recovery.threads.per.data.dir=1
#log.flush.interval.messages=10000
#log.flush.interval.ms=1000
log.retention.hours=72
#log.retention.bytes=1073741824
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.101.2.23:2181,10.101.2.24:2181,10.101.2.25:2181
zookeeper.connection.timeout.ms=6000
# 复制配置文件到其他节点, 不要忘记修改 broker.id
scp server.properties root@ip:/usr/local/kafka_2.12-0.10.2.0/config/
# 启动kafka集群
cd /usr/local/kafka_2.12-0.10.2.0/
bin/kafka-server-start.sh config/server.properties > /dev/null &
# 友情赠送几条常用命令
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test # 创建topic
bin/kafka-topics.sh --list --zookeeper localhost:2181 # 查看已经创建的topic列表
bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic test # 查看topic的详细信息
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test # 发送消息, 回车后模拟输入一下消息
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test # 消费消息, 可以换到其他kafka节点, 同步接收生产节点发送的消息
bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic test --partitions 6 # 给topic增加分区
bin/kafka-topics.sh --delete --zookeeper localhost:2181 --topic test1 # 删除已经创建的topic, 前提是开了delete.topic.enable=true参数
如果还不能删除, 可以到zookeeper中去干掉它
cd /usr/local/zookeeper-3.4.10/
bin/zkCli.sh
ls /brokers/topics # 查看topic
rmr /brokers/topics/test1 # 删除topic
至此kafka集群配置完毕
【六】logstash——broker集群配置
# java环境配置略
# 解压logstash-5.3.1.tar.gz
cd /usr/local
tar -xvf src/logstash-5.3.1.tar.gz
# 添加配置文件 beat_to_kafka.conf
cd logstash-5.3.1
vi config/beat_to_kafka.conf # 输入下面内容后保存
input {
beats {
port => 5044
}
}
filter {
}
# topic_id改成按beat中配置的document_type来输出到不同的topic中, 供kibana分组过滤用
output {
kafka {
bootstrap_servers => "10.101.2.23:9092,10.101.2.24:9092,10.101.2.25:9092"
# topic_id => "bsd-log"
topic_id => ‘%{[type]}‘
}
}
# 启动logstash
bin/logstash -f config/beat_to_kafka.conf > /dev/null &
至此logstash-broker集群配置完成
【七】在实际应用服务器上安装filebeat
# 解压filebeat-5.3.1-linux-x86_64.tar.gz
cd /usr/local/
tar -xvf src/filebeat-5.3.1-linux-x86_64.tar.gz
mv filebeat-5.3.1-linux-x86_64 filebeat-5.3.1
# 配置filebeat.yml文件, 先找一台drds试水
cd filebeat-5.3.1
vi filebeat.yml # 主要参数如下
#=========================== 文件正文开始 =============================
filebeat.prospectors:
-
input_type: log
paths:
- /home/admin/drds-server/3306/logs/rms/slow.log
- /home/admin/drds-server/3306/logs/engineering/slow.log
- /home/admin/drds-server/3306/logs/sc_file/slow.log
- /home/admin/drds-server/3306/logs/sc_user/slow.log
- /home/admin/drds-server/3306/logs/sc_order/slow.log
- /home/admin/drds-server/3306/logs/sc_inventory/slow.log
- /home/admin/drds-server/3306/logs/sc_marketing/slow.log
- /home/admin/drds-server/3306/logs/sc_message/slow.log
- /home/admin/drds-server/3306/logs/sc_channel/slow.log
#exclude_lines: ["^DBG"]
#include_lines: [‘Exception‘,‘ERR_CODE‘]
#exclude_files: [".gz$"]
document_type: drds-slow
# 设定合并正则
multiline.pattern: ^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}
multiline.negate: true
multiline.match: after
# 单台机器上配置不同document_type
-
input_type: log
paths:
- /home/admin/drds-server/3306/logs/test/sql.log
document_type: drds-sql
multiline.pattern: ^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}
multiline.negate: true
multiline.match: after
#----------------------------- Logstash output --------------------------------
output.logstash:
# The Logstash hosts
hosts: ["10.101.2.23:5044","10.101.2.24:5044","10.101.2.25:5044"]
#=========================== 文件正文结束 =============================
# 启动filebeat
./filebeat -c filebeat.yml > /dev/null &
【八】配置logstash——indexer集群
# java环境配置略
# 解压logstash-5.3.1.tar.gz
cd /usr/local
tar -xvf src/logstash-5.3.1.tar.gz
# 添加配置文件 kafka_to_es.conf
cd logstash-5.3.1
vi config/kafka_to_es.conf # 输入下面内容后保存
# input里关于server以及topic的配置, 5.x和以前版本不同
input {
kafka {
bootstrap_servers => "10.101.2.23:9092,10.101.2.24:9092,10.101.2.25:9092"
group_id => "logstash"
topics => ["drds-sql","drds-slow","sc_user","sc_channel","sc_order","sc_inventory","sc_message","sc_file","sc_marketing","rms",‘scm‘,‘engineering‘]
consumer_threads => 50
decorate_events => true
}
}
filter {
}
output {
elasticsearch {
hosts => ["10.101.2.28:9200","10.101.2.29:9200","10.101.2.30:9200"]
index => "logstash-%{+YYYY.MM.dd.hh}"
manage_template => true
template_overwrite => true
template_name => "drdsLogstash"
flush_size => 50000
idle_flush_time => 10
}
}
# 启动logstash
bin/logstash -f config/kafka_to_es.conf > /dev/null &
至此logstash-indexer集群配置完成, 不出意外, elasticsearch-head里应该已经有数据写入了
【九】配置kibana集群
# 解压kibana-5.3.1-linux-x86_64.tar.gz
cd /usr/local
tar -xvf src/kibana-5.3.1-linux-x86_64.tar.gz
mv kibana-5.3.1-linux-x86_64/ kibana-5.3.1
# 配置kibana.yml文件
cd kibana-5.3.1
vi config/kibana.yml #主要参数如下
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.url: "http://10.101.2.28:9200" # 指到es集群master节点
# 启动kibana
bin/kibana > /dev/null &
# kibana对浏览器兼容性不好, 低版本的chrome和ie都不能访问, 显示正在加载
访问 http://ip:5601
# 另一台kibana节点按相同配置(可以把es.url指到另一个节点), kibana查询支持布尔运算符, 通配符等, 关键字要大些(如AND OR), 具体自行百度
【十】配置nginx代理
# 安装nginx需要的一些依赖包
yum -y install pcre-devel
yum -y install gd-devel
# 解压nginx-1.12.0.tar.gz
cd /usr/local/
tar -xvf src/nginx-1.12.0.tar.gz
# 安装nginx
cd nginx-1.12.0
./configure --prefix=/usr/local/nginx-1.12.0/ --conf-path=/usr/local/nginx-1.12.0/nginx.conf
make
make install
# 配置 nginx.conf 文件, 我们这边只做负载均衡, 随便设设就好
vi /usr/local/nginx-1.12.0/nginx.conf
worker_processes 1;
error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main ‘$remote_addr - $remote_user [$time_local] "$request" ‘
‘$status $body_bytes_sent "$http_referer" ‘
‘"$http_user_agent" "$http_x_forwarded_for"‘;
access_log logs/access.log main;
sendfile on;
keepalive_timeout 65;
upstream kibana {
server 10.101.2.31:5601;
server 10.101.2.32:5601;
}
server {
listen 15601;
server_name 10.101.2.31;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
proxy_pass http://kibana;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
# 启动nginx
sbin/nginx
# 之后在浏览器上访问 http://nginx_ip:15601 即可
至此集群所有组件配置完成
ELK5.3+Kafka集群配置