Dear
Use logs-to-elasticsearch.bro send logs to ES. Is now work.
ES error logs:
[2016-03-25 17:30:52,957][DEBUG][action.bulk ] [node-1] [whbro-201603251500][1] failed to execute bulk item (index) index {[whbro-201603251500][dns][AVOtHLQHooGOx5uLgLSQ], source[{"_timestamp":1458898236411,“ts”:1458898206267,“uid”:“ClbNI74bIcRQ8Gs6Wc”,“id.orig_h”:“10.100.78.88”,“id.orig_p”:137,“id.resp_h”:“10.100.79.255”,“id.resp_p”:137,“proto”:“udp”,“trans_id”:47282,“query”:“ISATAP”,“qclass”:1,“qclass_name”:“C_INTERNET”,“qtype”:32,“qtype_name”:“NB”,“AA”:false,“TC”:false,“RD”:true,“RA”:false,“Z”:1,“rejected”:false}]}
MapperParsingException[Field [_timestamp] is a metadata field and cannot be added inside a document. Use the index API request parameters.]
at org.elasticsearch.index.mapper.DocumentParser.parseObject(DocumentParser.java:213)
at org.elasticsearch.index.mapper.DocumentParser.innerParseDocument(DocumentParser.java:131)
at org.elasticsearch.index.mapper.DocumentParser.parseDocument(DocumentParser.java:79)
at org.elasticsearch.index.mapper.DocumentMapper.parse(DocumentMapper.java:304)
at org.elasticsearch.index.shard.IndexShard.prepareCreate(IndexShard.java:500)
at org.elasticsearch.index.shard.IndexShard.prepareCreateOnPrimary(IndexShard.java:481)
at org.elasticsearch.action.index.TransportIndexAction.prepareIndexOperationOnPrimary(TransportIndexAction.java:214)
at org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary(TransportIndexAction.java:223)
at org.elasticsearch.action.bulk.TransportShardBulkAction.shardIndexOperation(TransportShardBulkAction.java:326)
at org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:119)
at org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:68)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryPhase.doRun(TransportReplicationAction.java:595)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:263)
at org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler.messageReceived(TransportReplicationAction.java:260)
at org.elasticsearch.transport.TransportService$4.doRun(TransportService.java:350)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Bro config file:
/usr/local/bro/lib/bro/plugins/Bro_ElasticSearch/scripts/init.bro
module LogElasticSearch;
export {
## Destination for the ES logs. Valid options are
## “direct” to directly connect to ES and “nsq” to
## transfer the logs into an nsqd instance.
const destination = “direct” &redef;
## Name of the ES cluster.
const cluster_name = “my-application” &redef;
## ES server.
const server_host = “10.100.79.10” &redef;
## ES port.
const server_port = 9200 &redef;
## Name of the ES index.
const index_prefix = “testooo” &redef;
## Should the index names be in UTC or in local time?
## Setting this to true would be more compatible with Kibana and other tools.
const index_name_in_utc = F &redef;
## Format for the index names.
## Setting this to “%Y.%m.%d-%H” would be more compatible Kibana and other tools.
#const index_name_fmt = “%Y%m%d” &redef;
const index_name_fmt = “%Y%m%d%H%M” &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = “bro_” would create types of bro_dns, bro_software, etc.
const type_prefix = “” &redef;
## The time before an ElasticSearch transfer will timeout. Note that
## the fractional part of the timeout will be ignored. In particular,
## time specifications less than a second result in a timeout value of
## 0, which means “no timeout.”
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
## If the “nsq” destination is given, this is the topic
## that Bro will push logs into.
const nsq_topic = “bro_logs” &redef;
}