-
Notifications
You must be signed in to change notification settings - Fork 5k
Add support for parsers in filestream input #24763
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
4003476
9ef72eb
57f9b72
b9f93ae
097b76f
ab5df90
446cae9
0664c8b
245c625
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,89 @@ | ||
| // Licensed to Elasticsearch B.V. under one or more contributor | ||
| // license agreements. See the NOTICE file distributed with | ||
| // this work for additional information regarding copyright | ||
| // ownership. Elasticsearch B.V. licenses this file to you under | ||
| // the Apache License, Version 2.0 (the "License"); you may | ||
| // not use this file except in compliance with the License. | ||
| // You may obtain a copy of the License at | ||
| // | ||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||
| // | ||
| // Unless required by applicable law or agreed to in writing, | ||
| // software distributed under the License is distributed on an | ||
| // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
| // KIND, either express or implied. See the License for the | ||
| // specific language governing permissions and limitations | ||
| // under the License. | ||
|
|
||
| // +build integration | ||
|
|
||
| package filestream | ||
|
|
||
| const ( | ||
| elasticsearchMultilineLogs = `[2015-12-06 01:44:16,735][INFO ][node ] [Zach] version[2.0.0], pid[48553], build[de54438/2015-10-22T08:09:48Z] | ||
|
kvch marked this conversation as resolved.
Outdated
|
||
| [2015-12-06 01:44:16,736][INFO ][node ] [Zach] initializing ... | ||
| [2015-12-06 01:44:16,804][INFO ][plugins ] [Zach] loaded [], sites [] | ||
| [2015-12-06 01:44:16,941][INFO ][env ] [Zach] using [1] data paths, mounts [[/ (/dev/disk1)]], net usable_space [66.3gb], net total_space [232.6gb], spins? [unknown], types [hfs] | ||
| [2015-12-06 01:44:19,177][INFO ][node ] [Zach] initialized | ||
| [2015-12-06 01:44:19,177][INFO ][node ] [Zach] starting ... | ||
| [2015-12-06 01:44:19,356][INFO ][transport ] [Zach] publish_address {127.0.0.1:9300}, bound_addresses {127.0.0.1:9300}, {[fe80::1]:9300}, {[::1]:9300} | ||
| [2015-12-06 01:44:19,367][INFO ][discovery ] [Zach] elasticsearch/qfPw9z0HQe6grbJQruTCJQ | ||
| [2015-12-06 01:44:22,405][INFO ][cluster.service ] [Zach] new_master {Zach}{qfPw9z0HQe6grbJQruTCJQ}{127.0.0.1}{127.0.0.1:9300}, reason: zen-disco-join(elected_as_master, [0] joins received) | ||
| [2015-12-06 01:44:22,432][INFO ][http ] [Zach] publish_address {127.0.0.1:9200}, bound_addresses {127.0.0.1:9200}, {[fe80::1]:9200}, {[::1]:9200} | ||
| [2015-12-06 01:44:22,432][INFO ][node ] [Zach] started | ||
| [2015-12-06 01:44:22,446][INFO ][gateway ] [Zach] recovered [0] indices into cluster_state | ||
| [2015-12-06 01:44:52,882][INFO ][cluster.metadata ] [Zach] [filebeat-2015.12.06] creating index, cause [auto(bulk api)], templates [], shards [5]/[1], mappings [process, system] | ||
| [2015-12-06 01:44:53,256][INFO ][cluster.metadata ] [Zach] [filebeat-2015.12.06] update_mapping [process] | ||
| [2015-12-06 01:44:53,269][DEBUG][action.admin.indices.mapping.put] [Zach] failed to put mappings on indices [[filebeat-2015.12.06]], type [process] | ||
| MergeMappingException[Merge failed with failures {[mapper [proc.mem.rss_p] of different type, current_type [long], merged_type [double]]}] | ||
| at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:388) | ||
| at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:388) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:225) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:188) | ||
| at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | ||
| at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | ||
| at java.lang.Thread.run(Thread.java:745) | ||
| [2015-12-06 01:44:53,274][DEBUG][action.bulk ] [Zach] [filebeat-2015.12.06][0] failed to execute bulk item (index) index {[filebeat-2015.12.06][process][AVF0v5vcVA0hoJdODMTz], source[{"@timestamp":"2015-12-06T00:44:52.448Z","beat":{"hostname":"ruflin","name":"ruflin"},"count":1,"proc":{"cpu":{"user":1902,"user_p":0,"system":941,"total":2843,"start_time":"Dec03"},"mem":{"size":3616309248,"rss":156405760,"rss_p":0.01,"share":0},"name":"Google Chrome H","pid":40572,"ppid":392,"state":"running"},"type":"process"}]} | ||
| MergeMappingException[Merge failed with failures {[mapper [proc.mem.rss_p] of different type, current_type [long], merged_type [double]]}] | ||
| at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:388) | ||
| at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:388) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:225) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:188) | ||
| at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | ||
| at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | ||
| at java.lang.Thread.run(Thread.java:745) | ||
| [2015-12-06 01:44:53,279][DEBUG][action.admin.indices.mapping.put] [Zach] failed to put mappings on indices [[filebeat-2015.12.06]], type [process] | ||
| MergeMappingException[Merge failed with failures {[mapper [proc.mem.rss_p] of different type, current_type [long], merged_type [double], mapper [proc.cpu.user_p] of different type, current_type [long], merged_type [double]]}] | ||
| at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:388) | ||
| at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:388) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:225) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:188) | ||
| at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | ||
| at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | ||
| at java.lang.Thread.run(Thread.java:745) | ||
| [2015-12-06 01:44:53,280][DEBUG][action.bulk ] [Zach] [filebeat-2015.12.06][1] failed to execute bulk item (index) index {[filebeat-2015.12.06][process][AVF0v5vbVA0hoJdODMTj], source[{"@timestamp":"2015-12-06T00:44:52.416Z","beat":{"hostname":"ruflin","name":"ruflin"},"count":1,"proc":{"cpu":{"user":6643,"user_p":0.01,"system":693,"total":7336,"start_time":"01:44"},"mem":{"size":5182656512,"rss":248872960,"rss_p":0.01,"share":0},"name":"java","pid":48553,"ppid":48547,"state":"running"},"type":"process"}]} | ||
| MergeMappingException[Merge failed with failures {[mapper [proc.mem.rss_p] of different type, current_type [long], merged_type [double], mapper [proc.cpu.user_p] of different type, current_type [long], merged_type [double]]}] | ||
| at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:388) | ||
| at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:388) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:225) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:188) | ||
| at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | ||
| at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | ||
| at java.lang.Thread.run(Thread.java:745) | ||
| [2015-12-06 01:44:53,334][INFO ][cluster.metadata ] [Zach] [filebeat-2015.12.06] update_mapping [system] | ||
| [2015-12-06 01:44:53,646][INFO ][cluster.metadata ] [Zach] [filebeat-2015.12.06] create_mapping [filesystem] | ||
| ` | ||
|
|
||
| elasticsearchMultilineLongLogs = `[2015-12-06 01:44:16,735][INFO ][node ] [Zach] version[2.0.0], pid[48553], build[de54438/2015-10-22T08:09:48Z] | ||
| [2015-12-06 01:44:53,269][DEBUG][action.admin.indices.mapping.put] [Zach] failed to put mappings on indices [[filebeat-2015.12.06]], type [process] | ||
| MergeMappingException[Merge failed with failures {[mapper [proc.mem.rss_p] of different type, current_type [long], merged_type [double]]}] | ||
| at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:388) | ||
| at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:388) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:225) | ||
| at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:188) | ||
| at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) | ||
| at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) | ||
| at java.lang.Thread.run(Thread.java:745) | ||
| [2015-12-06 01:44:53,646][INFO ][cluster.metadata ] [Zach] [filebeat-2015.12.06] create_mapping [filesystem] | ||
| ` | ||
| ) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -57,6 +57,8 @@ type filestream struct { | |
| encodingFactory encoding.EncodingFactory | ||
| encoding encoding.Encoding | ||
| closerConfig closerConfig | ||
| parserConfig []common.ConfigNamespace | ||
| msgPostProc []postProcesser | ||
| } | ||
|
|
||
| // Plugin creates a new filestream input plugin for creating a stateful input. | ||
|
|
@@ -216,8 +218,16 @@ func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, path stri | |
| } | ||
|
|
||
| r = readfile.NewStripNewline(r, inp.readerConfig.LineTerminator) | ||
|
|
||
| r, err = newParsers(r, parserConfig{maxBytes: inp.readerConfig.MaxBytes, lineTerminator: inp.readerConfig.LineTerminator}, inp.readerConfig.Parsers) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
|
|
||
| r = readfile.NewLimitReader(r, inp.readerConfig.MaxBytes) | ||
|
urso marked this conversation as resolved.
Outdated
|
||
|
|
||
| inp.msgPostProc = newPostProcessors(inp.readerConfig.Parsers) | ||
|
|
||
| return r, nil | ||
| } | ||
|
|
||
|
|
@@ -369,16 +379,24 @@ func (inp *filestream) eventFromMessage(m reader.Message, path string) beat.Even | |
| }, | ||
| } | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder if we can/should inject a processor at the beginning? E.g. if we have filebeat collect the output of another filebeat, shall we really overwrite the existing fields with the new ones we define here, or should we make an attempt to keep the original fields from the original log file intact if they are present? |
||
| fields.DeepUpdate(m.Fields) | ||
| m.Fields = fields | ||
|
|
||
| for _, proc := range inp.msgPostProc { | ||
| proc.PostProcess(&m) | ||
| } | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we need to build and apply the post processors separately? Couldn't we just ensure that the parser correctly modifies the
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, the fields |
||
|
|
||
| if len(m.Content) > 0 { | ||
| if fields == nil { | ||
| fields = common.MapStr{} | ||
| if m.Fields == nil { | ||
| m.Fields = common.MapStr{} | ||
| } | ||
| if _, ok := m.Fields["message"]; !ok { | ||
| m.Fields["message"] = string(m.Content) | ||
| } | ||
| fields["message"] = string(m.Content) | ||
| } | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For the complete function body up until here I wonder, if this could be a separate parser we just append to the list of parsers? In that case the |
||
|
|
||
| return beat.Event{ | ||
| Timestamp: m.Ts, | ||
| Fields: fields, | ||
| Meta: m.Meta, | ||
| Fields: m.Fields, | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,140 @@ | ||
| // Licensed to Elasticsearch B.V. under one or more contributor | ||
| // license agreements. See the NOTICE file distributed with | ||
| // this work for additional information regarding copyright | ||
| // ownership. Elasticsearch B.V. licenses this file to you under | ||
| // the Apache License, Version 2.0 (the "License"); you may | ||
| // not use this file except in compliance with the License. | ||
| // You may obtain a copy of the License at | ||
| // | ||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||
| // | ||
| // Unless required by applicable law or agreed to in writing, | ||
| // software distributed under the License is distributed on an | ||
| // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
| // KIND, either express or implied. See the License for the | ||
| // specific language governing permissions and limitations | ||
| // under the License. | ||
|
|
||
| package filestream | ||
|
|
||
| import ( | ||
| "errors" | ||
| "fmt" | ||
| "io" | ||
|
|
||
| "github.com/elastic/beats/v7/libbeat/common" | ||
| "github.com/elastic/beats/v7/libbeat/reader" | ||
| "github.com/elastic/beats/v7/libbeat/reader/multiline" | ||
| "github.com/elastic/beats/v7/libbeat/reader/readfile" | ||
| "github.com/elastic/beats/v7/libbeat/reader/readjson" | ||
| ) | ||
|
|
||
| var ( | ||
| ErrNoSuchParser = errors.New("no such parser") | ||
| ) | ||
|
|
||
| // parser transforms or translates the Content attribute of a Message. | ||
| // They are able to aggregate two or more Messages into a single one. | ||
| type parser interface { | ||
| io.Closer | ||
| Next() (reader.Message, error) | ||
| } | ||
|
urso marked this conversation as resolved.
Outdated
|
||
|
|
||
| type parserConfig struct { | ||
| maxBytes int | ||
| lineTerminator readfile.LineTerminator | ||
| } | ||
|
|
||
| type postProcesser interface { | ||
| PostProcess(*reader.Message) | ||
| Name() string | ||
| } | ||
|
|
||
| func newParsers(in reader.Reader, pCfg parserConfig, c []common.ConfigNamespace) (parser, error) { | ||
|
kvch marked this conversation as resolved.
Outdated
|
||
| p := in | ||
|
|
||
| parserCheck := make(map[string]int) | ||
| for _, ns := range c { | ||
| name := ns.Name() | ||
| switch name { | ||
| case "multiline": | ||
| parserCheck["multiline"]++ | ||
| var config multiline.Config | ||
| cfg := ns.Config() | ||
| err := cfg.Unpack(&config) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("error while parsing multiline parser config: %+v", err) | ||
| } | ||
| p, err = multiline.New(p, "\n", pCfg.maxBytes, &config) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("error while creating multiline parser: %+v", err) | ||
| } | ||
| case "ndjson": | ||
| parserCheck["ndjson"]++ | ||
| var config readjson.Config | ||
| cfg := ns.Config() | ||
| err := cfg.Unpack(&config) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("error while parsing ndjson parser config: %+v", err) | ||
| } | ||
| p = readjson.NewJSONReader(p, &config) | ||
| default: | ||
| return nil, fmt.Errorf("%s: %s", ErrNoSuchParser, name) | ||
| } | ||
| } | ||
|
|
||
| // This is a temporary check. In the long run configuring multiple parsers with the same | ||
| // type is going to be supported. | ||
| if count, ok := parserCheck["multiline"]; ok && count > 1 { | ||
| return nil, fmt.Errorf("only one parser is allowed for multiline, got %d", count) | ||
| } | ||
| if count, ok := parserCheck["ndjson"]; ok && count > 1 { | ||
| return nil, fmt.Errorf("only one parser is allowed for ndjson, got %d", count) | ||
| } | ||
|
|
||
| return p, nil | ||
| } | ||
|
|
||
| func newPostProcessors(c []common.ConfigNamespace) []postProcesser { | ||
| var pp []postProcesser | ||
| for _, ns := range c { | ||
| name := ns.Name() | ||
| switch name { | ||
| case "ndjson": | ||
| var config readjson.Config | ||
| cfg := ns.Config() | ||
| cfg.Unpack(&config) | ||
| pp = append(pp, readjson.NewJSONPostProcessor(&config)) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what happens to post processing if I have multiple JSON parsers? E.g.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In this case, multiple post processors are added. It means that e.g. if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm more curious what happens to the event if one has e.g. let's assume I have the configuration: with this event: then given the parsers I would assume my event will look like: What what will actually be produced is: Because the post processor removes |
||
| default: | ||
| continue | ||
| } | ||
| } | ||
|
|
||
| return pp | ||
| } | ||
|
|
||
| func validateParserConfig(pCfg parserConfig, c []common.ConfigNamespace) error { | ||
| for _, ns := range c { | ||
| name := ns.Name() | ||
| switch name { | ||
| case "multiline": | ||
| var config multiline.Config | ||
| cfg := ns.Config() | ||
| err := cfg.Unpack(&config) | ||
| if err != nil { | ||
| return fmt.Errorf("error while parsing multiline parser config: %+v", err) | ||
| } | ||
| case "ndjson": | ||
| var config readjson.Config | ||
| cfg := ns.Config() | ||
| err := cfg.Unpack(&config) | ||
| if err != nil { | ||
| return fmt.Errorf("error while parsing ndjson parser config: %+v", err) | ||
| } | ||
| default: | ||
| return fmt.Errorf("%s: %s", ErrNoSuchParser, name) | ||
| } | ||
| } | ||
|
|
||
| return nil | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.