logstash写入到kafka和从kafka读取日志

收集nginx日志放到kafka

修改nginx日志格式:[nginx日志格式修改](https://blog.51cto.com/9025736/2373483)
input {
  file {
    type => "nginx-access"
    path => "/data/wwwlogs/access_nginx.log"
    start_position => "beginning"
    codec => json
 }

file {
   path => "/var/log/messages"
   start_position => "beginning"
   type => "system-log-252"
    }
   }
 }
output {
    if [type] == "nginx-access" {
kafka {
    bootstrap_servers => "192.168.1.252:9092" #kafka服务器地址
    topic_id => "252nginx-accesslog"
    batch_size => 5
    codec => "json" #写入的时候使用json编码,因为logstash收集后会转换成json格式
      }
      } 
                }
  if [type] == "system-log-252" {
 kafka {
  bootstrap_servers => "192.168.1.252:9092"
  topic_id => "system-log-252"
  batch_size => 5
  codec => "json" #写入的时候使用json编码,因为logstash收集后会转换成json格式
             }
                 }
     }
}

配置logstash从kafka读取日志

input {
  kafka {
     bootstrap_servers => "192.168.1.252:9092" #kafka服务器地址
     topics => "252nginx-accesslog"
     batch_size => 5
     codec => "json" #写入的时候使用json编码,因为logstash收集后会转换成json格式
     group_id => "252nginx-access-log" 
     consumer_threads => 1
     decorate_events => true 
  }
         kafka {
bootstrap_servers => "192.168.1.252:9092"
topics => "system-log-252"
consumer_threads => 1
decorate_events => true
codec => "json" 
}
}
output {
  if [type] == "252nginx-accesslo" {
  elasticsearch {
    hosts => ["192.168.1.252:9200"]
    index => "252nginx-accesslog-%{+YYYY.MM.dd}"
  }}
          if [type] == "system-log-252" {
 elasticsearch {
   hosts => ["192.168.1.252:9200"]
   index => "system-log-1512-%{+YYYY.MM.dd}"
 }
}    

相关文章

# 前言 现有主流消息中间件都是生产者-消费者模型,主要角色...
错误的根源是:kafka版本过高所致,2.2+=的版本,已经不需要...
DWS层主要是存放大宽表数据,此业务中主要是针对Kafka topic...
不多BB讲原理,只教你怎么用,看了全网没有比我更详细的了,...
终于写完了,其实最开始学kafka的时候是今年2月份,那时候还...
使用GPKafka实现Kafka数据导入Greenplum数据库踩坑问题记录(...