Skip to content

Instantly share code, notes, and snippets.

@yangl
Last active May 5, 2017 01:37
Show Gist options
  • Save yangl/9790a9c4287a1f9654b7 to your computer and use it in GitHub Desktop.
Save yangl/9790a9c4287a1f9654b7 to your computer and use it in GitHub Desktop.
Logstash相关,用了这个可以把flume扔了哈 https://www.elastic.co/guide/en/logstash/5.4/plugins-outputs-elasticsearch.html
input {
file {
path => ["/data/metricslog/alive/*.log"]
start_position => "beginning"
type => "metrics-alive"
}
file {
path => ["/data/metricslog/tp/*.log"]
start_position => "beginning"
type => "metrics-tp"
}
file {
path => ["/data/metricslog/nginx/**/metrics.access.log"]
start_position => "beginning"
type => "metrics-nginx"
}
file {
path => ["/data/metricslog/business/*.log"]
start_position => "beginning"
type => "metrics-business"
}
file {
path => ["/data/metricslog/business/*.log"]
start_position => "beginning"
type => "metrics-business"
}
file {
path => ["/data/dblog/**/*-db.log.*"]
start_position => "beginning"
type => "double-write-error-log"
}
}
filter {
if [type] in ["nginx", "tp"] {
drop {
percentage => 90
}
}
}
output {
kafka {
codec => plain{
format => "%{message}"
}
# 线上hosts配置
# 1.有信机房
# 10.10.82.180 KAFKA_HOST_0
# 10.10.30.207 KAFKA_HOST_1
# 10.10.81.103 KAFKA_HOST_2
# 2.阿里云机房
# 192.168.3.3 KAFKA_HOST_11
# 192.168.3.10 KAFKA_HOST_12
# 192.168.3.11 KAFKA_HOST_13
bootstrap_servers => "KAFKA_HOST_0:9092,KAFKA_HOST_1:9092,KAFKA_HOST_2:9092,KAFKA_HOST_11:9092,KAFKA_HOST_12:9092,KAFKA_HOST_13:9092"
# topic_id => "metrics-%{type}"
topic_id => "%{type}"
# acks "0" "1" "all"
acks => "1"
workers => 15
}
}
input {
jdbc {
jdbc_connection_string => "jdbc:mysql://xxxxxxxxxx:3306/uxinlive?connectTimeout=10000&autoReconnect=true&allowMultiQueries=true&useUnicode=true&zeroDateTimeBehavior=convertToNull"
jdbc_user => "root"
jdbc_password => "uxin.com"
jdbc_driver_library => "/home/hongbing/elk/mysql-connector-java-5.1.39.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
schedule => "*/5 * * * * *"
last_run_metadata_path => "/home/hongbing/elk/logstash-5.0.0/.room_info_logstash_jdbc_last_run"
statement => "select t1.*,t2.nickname,t2.head_portrait_url,t2.source from (select * from room_info where update_time > :sql_last_value) t1 LEFT join (select id,nickname,head_portrait_url,source from user_info where id in (select uid from room_info where update_time > :sql_last_value)) t2 on t1.uid =t2.id"
use_column_value => "true"
tracking_column => "update_time"
type => "room_info"
}
jdbc {
jdbc_connection_string => "jdbc:mysql://xxxxxxxxxx:3306/uxinlive?connectTimeout=10000&autoReconnect=true&allowMultiQueries=true&useUnicode=true&zeroDateTimeBehavior=convertToNull"
jdbc_user => "root"
jdbc_password => "uxin.com"
jdbc_driver_library => "/home/hongbing/elk/mysql-connector-java-5.1.39.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
schedule => "*/5 * * * * *"
last_run_metadata_path => "/home/hongbing/elk/logstash-5.0.0/.user_info_logstash_jdbc_last_run"
tracking_column => "update_timestamp"
use_column_value => "true"
statement => "select * , 0 as fans_cnt from user_info where update_timestamp > :sql_last_value"
type => "user_info"
}
jdbc {
jdbc_connection_string => "jdbc:mysql://xxxxxxxxxx:3306/uxinlive?connectTimeout=10000&autoReconnect=true&allowMultiQueries=true&useUnicode=true&zeroDateTimeBehavior=convertToNull"
jdbc_user => "root"
jdbc_password => "uxin.com"
jdbc_driver_library => "./mysql-connector-java-5.1.39.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
schedule => "*/5 * * * * *"
last_run_metadata_path => ".question_info_logstash_jdbc_last_run"
tracking_column => "update_time"
use_column_value => "true"
statement => "select * from question_info where update_time > :sql_last_value"
type => "question_info"
}
}
filter {
mutate {
split => { "tags" => "," }
split => { "source_tags" => "," }
add_field => { score => 0 }
}
}
output {
if [type] == "room_info" {
elasticsearch {
action => "update"
doc_as_upsert => true
hosts => ["localhost:9200"]
index => "uxinlive"
document_type => "%{type}"
document_id => "%{room_id}"
}
}
if [type] == "user_info" {
elasticsearch {
action => "update"
doc_as_upsert => true
hosts => ["localhost:9200"]
index => "hongdou"
document_type => "%{type}"
document_id => "%{id}"
}
}
if [type] == "question_info" {
elasticsearch {
action => "update"
doc_as_upsert => true
hosts => ["localhost:9200"]
index => "uxinlive"
document_type => "%{type}"
document_id => "%{id}"
}
}
stdout {
codec => json_lines
}
}
1.创建目录:(将das改为自己的业务名)
mkdir -p /data/metricslog/nginx/das/
2.在业务tomcat前面边nginx上定义自己业务的logformat,如下:(将das改为自己的业务名)
log_format metricsDas '{"logtype":"NGINX","system":"das","time":"$time_iso8601","upstreamAddr":"$upstream_addr","uri":"$uri","upstreamResponseTime":"$upstream_response_time","status":"$status"}';
3.在自己业务的sever中添加如下:(把路径中的das改为自己的业务名,日志format改为自己的日志格式)
access_log /data/weblog/es.access.log main; ##原有日志
access_log /data/metricslog/nginx/das/metrics.access.log metricsDas; ## 新增nginx访问性能监控日志
./sbin/nginx -t
./sbin/nginx -s reload
到/data/metricslog/nginx/das/目录下查看是否已有上边配置的日志文件
4.把logstash安装包从163的机子上拷过到/data/soft目录下并解压:
scp -P60086 [email protected]:/home/KevinYang/logstash.tgz /data/soft/
tar zxvf logstash.tgz
cd logstash && sh start.sh
export JAVA_HOME=/data/soft/jdk8
export PATH=$JAVA_HOME/bin:$PATH
pgrep -f logstash|xargs kill -9
sleep 2
nohup ./bin/logstash agent --verbose --auto-reload --reload-interval 60 -f logstash.conf > logstash.log 2>&1 &
@aserunix
Copy link

aserunix commented Feb 14, 2017

你好, 请问这个,性能如何?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment