
#上传logstash的rpm安装包
[root@es-kibana tools]# ls -lh logstash-6.6.0.rpm
-rw-r--r-- 1 root root 163M Dec 5 01:03 logstash-6.6.0.rpm
#安装logstash
rpm -ivh logstash-6.6.0.rpm
#安装redis
cd /tools
yum install -y wget net-tools gcc gcc-c++ make tar openssl openssl-devel cmake
cd /usr/local/src
wget 'http://download.redis.io/releases/redis-4.0.9.tar.gz'
tar -zxf redis-4.0.9.tar.gz
cd redis-4.0.9
make
mkdir -pv /usr/local/redis/conf /usr/local/redis/bin
cp src/redis* /usr/local/redis/bin/
cp redis.conf /usr/local/redis/conf
#配置redis的配置文件
cat >/usr/local/redis/conf/redis.conf<<'EOF'
daemonize yes
port 6379
logfile /var/log/redis.log
dir /tmp
dbfilename dump.rdbRDB
bind 192.168.2.197 127.0.0.1
EOF
#Redis的启动命令
/usr/local/redis/bin/redis-server /usr/local/redis/conf/redis.conf
#启动es,kibana,filebeat
systemctl start elasticsearch
systemctl start kibana
systemctl start filebeat
#配置filebeat的文件写入到
cat >/etc/filebeat/filebeat.yml<<'EOF'
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["access"]
setup.kibana:
host: "192.168.2.197:5601"
output.redis:
hosts: ["192.168.2.197"]
port: 6379
key: "filebeat"
db: 0
timeout: 5
EOF
#重启filebeat
systemctl restart filebeat
#redis查看数据
[root@es-kibana redis-4.0.9]# /usr/local/redis/bin/redis-cli -h 192.168.2.197 -p 6379
192.168.2.197:6379> KEYS *
1) "filebeat" key的名称
192.168.2.197:6379> type filebeat
list 类型为列表
192.168.2.197:6379> LLEN filebeat
(integer) 16 16条缓存的数据
192.168.2.197:6379> LRANGE filebeat 1 16
#添加测试数据
ab -n 20000 -c 20 http://192.168.2.197/
#添加数据后数据量变成了2064
192.168.2.197:6379> LLEN filebeat
(integer) 2064
192.168.2.197:6379> LLEN filebeat
(integer) 40016
缓存了40016条数据
#配置logstash的配置文件
cat >/etc/logstash/conf.d/redis.conf<<'EOF'
input {
redis {
host => "192.168.2.197"
port => "6379"
db => "0"
key => "filebeat"
data_type => "list"
}
}
filter {
mutate {
convert => ["upstream_time", "float"]
convert => ["request_time", "float"]
}
}
output {
stdout {}
elasticsearch {
hosts => "http://192.168.2.197:9200"
manage_template => false
index => "nginx_access-%{+yyyy.MM}"
}
}
EOF
#启动logstash
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis.conf
#redis的数据正在往logstash的写入
[root@es-kibana ~]# /usr/local/redis/bin/redis-cli -h 192.168.2.197 -p 6379
192.168.2.197:6379> LLEN filebeat
(integer) 39266
192.168.2.197:6379> LLEN filebeat
(integer) 39141
192.168.2.197:6379> LLEN filebeat
(integer) 39016
192.168.2.197:6379> LLEN filebeat
(integer) 27141
192.168.2.197:6379> LLEN filebeat
(integer) 27016
192.168.2.197:6379> LLEN filebeat
(integer) 26891
redis有2万条数据logstash读一条redis就少一条数据
redis是生产者生产数据
logstash是消费者消费数据
#在网页检查

