Filebeat收集日誌
Filebeat介紹
ELK架構

insert into mysql.user values (‘localhost’,’root’,PASSWORD(‘123’),
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
‘Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’Y’,
’’,
’’,
’’,
’’,0,0,0,0,’mysql_native_password’,’’,’N’);
filebeat安裝部署
# 1.下載
# 2.安裝
[root@elkstack03 ~]
# yum localinstall -y filebeat-5.6.16-x86_64.rpm
# 3.備份原有配置檔案
[root@elkstack03 ~]
# cp /etc/filebeat/filebeat.yml{,.bak}
filebeat收集日誌輸出到檔案
[root@elkstack03 ~]
# vim /etc/filebeat/filebeat.yml
filebeat.prospectors:
- input_type:
log
paths:
- /var/log/nginx/blog.zls.com_access_json.log
#不收集的行
exclude_lines: [
"^DBG"
,
"^$"
]
#日誌型別
document_type: blog_ngx_log
- input_type:
log
paths:
- /var/log/nginx/www.zls.com_access_json.log
#不收集的行
exclude_lines: [
"^DBG"
,
"^$"
]
#日誌型別
document_type: www_ngx_log
output.file:
path:
"/tmp"
filename:
"zls_filebeat.txt"
# 啟動filebeat
[root@elkstack03 ~]
# systemctl start filebeat


filebeat從檔案輸出到Logstash
[root@elkstack03 ~]
# vi /etc/filebeat/filebeat.yml
filebeat.prospectors:
- input_type:
log
paths:
- /var/log/nginx/blog.zls.com_access_json.log
exclude_lines: [
"^DBG"
,
"^$"
]
document_type: ngx_zls
output.logstash:
#logstash 伺服器地址,可以是多個
hosts: [
"10.0.0.84:6666"
]
#是否開啟輸出至logstash,預設即為true
enabled:
true
#工作執行緒數
worker: 1
#壓縮級別
compression_level: 3
#多個輸出的時候開啟負載
# loadbalance: true
[root@elkstack03 ~]
# systemctl start filebeat
[root@elkstack03 ~]
# ps -ef|grep filebeat
root 27833 1 0 09:39 ? 00:00:00 /usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/sharefilebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat
[root@elkstack04 ~]
# vim /etc/logstash/conf.d/beats_es.conf
input{
beats{
port =>
"6666"
codec =>
"json"
}
}
filter{
json{
source
=>
"message"
remove_field =>
"message"
}
}
output{
elasticsearch{
hosts => [
"10.0.0.81:9200"
]
index =>
"%{type}-%{+yyyy.MM.dd}"
codec =>
"json"
}
}
[root@elkstack04 ~]
# echo 'PATH="/usr/share/logstash/bin:$PATH"' > /etc/profile.d/logstash.sh
[root@elkstack04 ~]
# source /etc/profile
[root@elkstack04 ~]
# logstash -f /etc/logstash/conf.d/beats_es.conf

filebeat輸出日誌到ES
[root@elkstack03 ~]
# vim /etc/filebeat/filebeat.yml
filebeat.prospectors:
- input_type:
log
paths:
- /var/log/nginx/blog.zls.com_access_json.log
exclude_lines: [
"^DBG"
,
"^$"
]
document_type: ngx_zls
output.elasticsearch:
hosts: [
"10.0.0.81:9200"
]
index:
"nginx_es-%{+yyyy.MM.dd}"
## filebeat從頭開始收集日誌
# 1.停止filebeat服務
[root@elkstack03 ~]
# systemctl stop filebeat
# 2.刪除registry檔案
[root@elkstack03 ~]
# rm -f /var/lib/filebeat/registry
# 3.啟動filebeat
[root@elkstack03 ~]
# systemctl start filebeat


使用fliebeat將日誌從檔案輸出到redis
[root@elkstack03 ~]
# vi /etc/filebeat/filebeat.yml
filebeat.prospectors:
- input_type:
log
paths:
- /var/log/nginx/blog.zls.com_access_json.log
exclude_lines: [
"^DBG"
,
"^$"
]
document_type: blog.zls.com_json
- input_type:
log
paths:
- /var/log/nginx/www.zls.com_access_json.log
exclude_lines: [
"^DBG"
,
"^$"
]
document_type: www.zls.com_json
output.redis:
hosts: [
"10.0.0.52:6379"
]
#Redis中的key名稱
key:
"nginx_log"
#使用6庫
db: 6
#設定超時時間
timeout
: 5
#redis密碼
# password: zls
[root@elkstack03 ~]
# systemctl stop filebeat
[root@elkstack03 ~]
# rm -f /var/lib/filebeat/registry
[root@elkstack03 ~]
# systemctl start filebeat

[root@elkstack04 ~]
# vim /etc/logstash/conf.d/ngx_redis_es.conf
input{
redis{
data_type =>
"list"
key =>
"nginx_log"
host =>
"10.0.0.52"
port =>
"6379"
db =>
"6"
codec =>
"json"
}
}
filter{
json{
source
=>
"message"
remove_field => [
"message"
]
}
}
output{
elasticsearch{
hosts => [
"10.0.0.82:9200"
]
index =>
"%{type}-%{+yyyy.MM.dd}"
}
}
[root@elkstack04 ~]
# logstash -f /etc/logstash/conf.d/ngx_redis_es.conf

filebeat收集日誌輸出到多個目標
[root@elkstack03 conf.d]
# vim /etc/filebeat/filebeat.yml
filebeat.prospectors:
- input_type:
log
paths:
- /usr/local/nginx/logs/access_json.log
#不收集的行
exclude_lines: [
"^DBG"
,
"^$"
]
#日誌型別
document_type: ngx_log
- input_type:
log
paths:
- /usr/local/tomcat/logs/tomcat_access_log.*.
log
#不收集的行
exclude_lines: [
"^DBG"
,
"^$"
]
#日誌型別
document_type: tc_log
output.redis:
#redis 伺服器地址,可以是多個
hosts: [
"10.0.0.54:6379"
]
key:
"tn"
db: 2
timeout
: 5
password: zls
output.file:
path:
"/tmp"
filename:
"zls.txt"
#工作執行緒數
worker: 1
#壓縮級別
compression_level: 3
#多個輸出的時候開啟負載
loadbalance:
true

文末福利

即將步入2025年,不少小夥伴在考慮來年的工作方向。
僅目前來說,傳統運維衝擊年薪30W+的轉型方向就是SRE&DevOps崗位。









