diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 4944d9f47e18..1c73e56a5ecf 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -26,6 +26,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Index template's default_fields setting is only populated with ECS fields. {pull}28596[28596] {issue}28215[28215] - Remove deprecated `--template` and `--ilm-policy` flags. Use `--index-management` instead. {pull}28870[28870] - Remove options `logging.files.suffix` and default to datetime endings. {pull}28927[28927] +- Remove Journalbeat. Use `journald` input of Filebeat instead. {pull}29131[29131] +- `include_matches` option of `journald` input no longer accepts a list of string. {pull}29294[29294] - Add job.name in pods controlled by Jobs {pull}28954[28954] *Auditbeat* @@ -148,6 +150,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add documentation for add_kubernetes_metadata processors `log_path` matcher. {pull}28868[28868] - Add support for parsers on journald input {pull}29070[29070] - Add support in httpjson input for oAuth2ProviderDefault of password grant_type. {pull}29087[29087] +- Add support for filtering in journald input with `unit`, `kernel`, `identifiers` and `include_matches`. {pull}29294[29294] - Add new `userAgent` and `beatInfo` template functions for httpjson input {pull}29528[29528] *Heartbeat* diff --git a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl index a1c7166cac06..010e5e36e2fd 100644 --- a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl +++ b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl @@ -566,10 +566,21 @@ filebeat.inputs: #id: service-foo # You may wish to have separate inputs for each service. You can use - # include_matches to specify a list of filter expressions that are + # include_matches.or to specify a list of filter expressions that are # applied as a logical OR. You may specify filter - #include_matches: - #- _SYSTEMD_UNIT=foo.service + #include_matches.or: + #- equals: + #- _SYSTEMD_UNIT=foo.service + + # List of syslog identifiers + #syslog_identifiers: ["audit"] + + # Collect events from the service and messages about the service, + # including coredumps. + #units: ["docker.service"] + + # The list of transports (_TRANSPORT field of journald entries) + #transports: ["audit"] # Parsers are also supported, here is an example of the multiline # parser. diff --git a/filebeat/docs/inputs/input-journald.asciidoc b/filebeat/docs/inputs/input-journald.asciidoc index 32343def2926..bbc4211b0c51 100644 --- a/filebeat/docs/inputs/input-journald.asciidoc +++ b/filebeat/docs/inputs/input-journald.asciidoc @@ -24,8 +24,8 @@ journal. ---- You may wish to have separate inputs for each service. You can use -`include_matches` to specify a list of filter expressions that are applied as a -logical OR. A good way to list the journald fields that are available for +`include_matches` to specify filtering expressions. +A good way to list the https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html[journald fields] that are available for filtering messages is to run `journalctl -o json` to output logs and metadata as JSON. This example collects logs from the `vault.service` systemd unit. @@ -34,7 +34,7 @@ JSON. This example collects logs from the `vault.service` systemd unit. {beatname_lc}.inputs: - type: journald id: service-vault - include_matches: + include_matches.match: - _SYSTEMD_UNIT=vault.service ---- @@ -48,7 +48,7 @@ possible. {beatname_lc}.inputs: - type: journald id: iptables - include_matches: + include_matches.match: - _TRANSPORT=kernel processors: - drop_event: @@ -133,14 +133,64 @@ If you have old log files and want to skip lines, start {beatname_uc} with `seek: tail` specified. Then stop {beatname_uc}, set `seek: cursor`, and restart {beatname_uc}. +[float] +[id="{beatname_lc}-input-{type}-units"] +==== `units` + +Iterate only the entries of the units specified in this option. The iterated entries include +messages from the units, messages about the units by authorized daemons and coredumps. However, +it does not match systemd user units. + +[float] +[id="{beatname_lc}-input-{type}-syslog-identifiers"] +==== `syslog_identifiers` + +Read only the entries with the selected syslog identifiers. + +[float] +[id="{beatname_lc}-input-{type}-transports"] +==== `transports` + +Collect the messages using the specified transports. Example: syslog. + +Valid transports: + +* audit: messages from the kernel audit subsystem +* driver: internally generated messages +* syslog: messages received via the local syslog socket with the syslog protocol +* journal: messages received via the native journal protocol +* stdout: messages from a service's standard output or error output +* kernel: messages from the kernel + [float] [id="{beatname_lc}-input-{type}-include-matches"] ==== `include_matches` -A list of filter expressions used to match fields. The format of the expression +A collection of filter expressions used to match fields. The format of the expression is `field=value`. {beatname_uc} fetches all events that exactly match the expressions. Pattern matching is not supported. +If you configured a filter expression, only entries with this field set will be iterated by the journald reader of Filebeat. +If the filter expressions apply to different fields, only entries with all fields set will be iterated. +If they apply to the same fields, only entries where the field takes one of the specified values will be iterated. + +`match`: List of filter expressions to match fields. +`or`: The filter expressions listed under `or` are connected with a disjunction (or). +`and`: The filter expressions listed under `and` are connected with a conjunction (and). + +Please note that these expressions are limited. You can build complex filtering, but full logical +expressions are not supported. + +The following include matches configuration reads all `systemd` syslog entries: + +["source","yaml",subs="attributes"] +---- +include_matches.and: +- match: + - "journald.process.name=systemd" + - "systemd.transport=syslog" +---- + To reference fields, use one of the following: * The field name used by the systemd journal. For example, diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 912a8f9bcb62..87a50f91a084 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -973,10 +973,21 @@ filebeat.inputs: #id: service-foo # You may wish to have separate inputs for each service. You can use - # include_matches to specify a list of filter expressions that are + # include_matches.or to specify a list of filter expressions that are # applied as a logical OR. You may specify filter - #include_matches: - #- _SYSTEMD_UNIT=foo.service + #include_matches.or: + #- equals: + #- _SYSTEMD_UNIT=foo.service + + # List of syslog identifiers + #syslog_identifiers: ["audit"] + + # Collect events from the service and messages about the service, + # including coredumps. + #units: ["docker.service"] + + # The list of transports (_TRANSPORT field of journald entries) + #transports: ["audit"] # Parsers are also supported, here is an example of the multiline # parser. diff --git a/filebeat/input/journald/config.go b/filebeat/input/journald/config.go index cb3b32e14c97..4e1c0b66da4d 100644 --- a/filebeat/input/journald/config.go +++ b/filebeat/input/journald/config.go @@ -48,7 +48,16 @@ type config struct { CursorSeekFallback journalread.SeekMode `config:"cursor_seek_fallback"` // Matches store the key value pairs to match entries. - Matches []journalfield.Matcher `config:"include_matches"` + Matches journalfield.IncludeMatches `config:"include_matches"` + + // Units stores the units to monitor. + Units []string `config:"units"` + + // Transports stores the list of transports to include in the messages. + Transports []string `config:"transports"` + + // Identifiers stores the syslog identifiers to watch. + Identifiers []string `config:"syslog_identifiers"` // SaveRemoteHostname defines if the original source of the entry needs to be saved. SaveRemoteHostname bool `config:"save_remote_hostname"` diff --git a/filebeat/input/journald/input.go b/filebeat/input/journald/input.go index e9e2f0b40c48..41b6c649f90d 100644 --- a/filebeat/input/journald/input.go +++ b/filebeat/input/journald/input.go @@ -43,7 +43,10 @@ type journald struct { MaxBackoff time.Duration Seek journalread.SeekMode CursorSeekFallback journalread.SeekMode - Matches []journalfield.Matcher + Matches journalfield.IncludeMatches + Units []string + Transports []string + Identifiers []string SaveRemoteHostname bool Parsers parser.Config } @@ -105,6 +108,9 @@ func configure(cfg *common.Config) ([]cursor.Source, cursor.Input, error) { Seek: config.Seek, CursorSeekFallback: config.CursorSeekFallback, Matches: config.Matches, + Units: config.Units, + Transports: config.Transports, + Identifiers: config.Identifiers, SaveRemoteHostname: config.SaveRemoteHostname, Parsers: config.Parsers, }, nil @@ -156,7 +162,8 @@ func (inp *journald) Run( func (inp *journald) open(log *logp.Logger, canceler input.Canceler, src cursor.Source) (*journalread.Reader, error) { backoff := backoff.NewExpBackoff(canceler.Done(), inp.Backoff, inp.MaxBackoff) - reader, err := journalread.Open(log, src.Name(), backoff, withFilters(inp.Matches)) + reader, err := journalread.Open(log, src.Name(), backoff, + withFilters(inp.Matches), withUnits(inp.Units), withTransports(inp.Transports), withSyslogIdentifiers(inp.Identifiers)) if err != nil { return nil, sderr.Wrap(err, "failed to create reader for %{path} journal", src.Name()) } @@ -184,9 +191,27 @@ func initCheckpoint(log *logp.Logger, c cursor.Cursor) checkpoint { return cp } -func withFilters(filters []journalfield.Matcher) func(*sdjournal.Journal) error { +func withFilters(filters journalfield.IncludeMatches) func(*sdjournal.Journal) error { return func(j *sdjournal.Journal) error { - return journalfield.ApplyMatchersOr(j, filters) + return journalfield.ApplyIncludeMatches(j, filters) + } +} + +func withUnits(units []string) func(*sdjournal.Journal) error { + return func(j *sdjournal.Journal) error { + return journalfield.ApplyUnitMatchers(j, units) + } +} + +func withTransports(transports []string) func(*sdjournal.Journal) error { + return func(j *sdjournal.Journal) error { + return journalfield.ApplyTransportMatcher(j, transports) + } +} + +func withSyslogIdentifiers(identifiers []string) func(*sdjournal.Journal) error { + return func(j *sdjournal.Journal) error { + return journalfield.ApplySyslogIdentifierMatcher(j, identifiers) } } diff --git a/filebeat/input/journald/input_filtering_test.go b/filebeat/input/journald/input_filtering_test.go new file mode 100644 index 000000000000..75718b3e5861 --- /dev/null +++ b/filebeat/input/journald/input_filtering_test.go @@ -0,0 +1,219 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux && cgo && withjournald +// +build linux,cgo,withjournald + +package journald + +import ( + "context" + "path" + "testing" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestInputSyslogIdentifier(t *testing.T) { + tests := map[string]struct { + identifiers []string + expectedMessages []string + }{ + "one identifier": { + identifiers: []string{"sudo"}, + expectedMessages: []string{ + "pam_unix(sudo:session): session closed for user root", + }, + }, + "two identifiers": { + identifiers: []string{"sudo", "systemd"}, + expectedMessages: []string{ + "pam_unix(sudo:session): session closed for user root", + "Started Outputs some log lines.", + }, + }, + } + + for name, testCase := range tests { + t.Run(name, func(t *testing.T) { + env := newInputTestingEnvironment(t) + inp := env.mustCreateInput(common.MapStr{ + "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, + "syslog_identifiers": testCase.identifiers, + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + defer cancelInput() + + env.waitUntilEventCount(len(testCase.expectedMessages)) + + for idx, event := range env.pipeline.clients[0].GetEvents() { + if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { + t.Fatalf("expecting event message %q, got %q", expected, got) + } + } + }) + } +} + +func TestInputUnits(t *testing.T) { + tests := map[string]struct { + units []string + kernel bool + expectedMessages []string + }{ + "one unit": { + units: []string{"session-1.scope"}, + kernel: true, + expectedMessages: []string{ + "pam_unix(sudo:session): session closed for user root", + }, + }, + "one unit with kernel": { + units: []string{"session-1.scope"}, + expectedMessages: []string{ + "pam_unix(sudo:session): session closed for user root", + }, + }, + "two units, all messages": { + units: []string{"session-1.scope", "user@1000.service"}, + expectedMessages: []string{ + "pam_unix(sudo:session): session closed for user root", + "Started Outputs some log lines.", + "1st line", + "2nd line", + "3rd line", + "4th line", + "5th line", + "6th line", + }, + }, + } + + for name, testCase := range tests { + t.Run(name, func(t *testing.T) { + env := newInputTestingEnvironment(t) + inp := env.mustCreateInput(common.MapStr{ + "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, + "units": testCase.units, + "kernel": testCase.kernel, + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + defer cancelInput() + + env.waitUntilEventCount(len(testCase.expectedMessages)) + + for idx, event := range env.pipeline.clients[0].GetEvents() { + if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { + t.Fatalf("expecting event message %q, got %q", expected, got) + } + } + }) + } +} + +func TestInputIncludeMatches(t *testing.T) { + tests := map[string]struct { + includeMatches map[string]interface{} + expectedMessages []string + }{ + "single match condition": { + includeMatches: map[string]interface{}{ + "match": []string{ + "syslog.facility=3", + }, + }, + expectedMessages: []string{ + "Started Outputs some log lines.", + "1st line", + "2nd line", + "3rd line", + "4th line", + "5th line", + "6th line", + }, + }, + "multiple match condition": { + includeMatches: map[string]interface{}{ + "match": []string{ + "journald.process.name=systemd", + "syslog.facility=3", + }, + }, + expectedMessages: []string{ + "Started Outputs some log lines.", + }, + }, + "and condition": { + includeMatches: map[string]interface{}{ + "and": []map[string]interface{}{ + map[string]interface{}{ + "match": []string{ + "syslog.facility=3", + "message=6th line", + }, + }, + }, + }, + expectedMessages: []string{ + "6th line", + }, + }, + "or condition": { + includeMatches: map[string]interface{}{ + "or": []map[string]interface{}{ + map[string]interface{}{ + "match": []string{ + "message=5th line", + "message=6th line", + }, + }, + }, + }, + expectedMessages: []string{ + "5th line", + "6th line", + }, + }, + } + + for name, testCase := range tests { + t.Run(name, func(t *testing.T) { + env := newInputTestingEnvironment(t) + inp := env.mustCreateInput(common.MapStr{ + "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, + "include_matches": testCase.includeMatches, + }) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + defer cancelInput() + + env.waitUntilEventCount(len(testCase.expectedMessages)) + + for idx, event := range env.pipeline.clients[0].GetEvents() { + if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { + t.Fatalf("expecting event message %q, got %q", expected, got) + } + } + }) + } + +} diff --git a/filebeat/input/journald/input_parsers_test.go b/filebeat/input/journald/input_parsers_test.go index 6aadb031cd2c..3c6d1ad5780e 100644 --- a/filebeat/input/journald/input_parsers_test.go +++ b/filebeat/input/journald/input_parsers_test.go @@ -36,8 +36,8 @@ func TestInputParsers(t *testing.T) { env := newInputTestingEnvironment(t) inp := env.mustCreateInput(common.MapStr{ - "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, - "include_matches": []string{"_SYSTEMD_USER_UNIT=log-service.service"}, + "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, + "include_matches.match": []string{"_SYSTEMD_USER_UNIT=log-service.service"}, "parsers": []common.MapStr{ { "multiline": common.MapStr{ diff --git a/filebeat/input/journald/pkg/journalfield/default.go b/filebeat/input/journald/pkg/journalfield/default.go index 7573eb4156df..a72df0ce5c0b 100644 --- a/filebeat/input/journald/pkg/journalfield/default.go +++ b/filebeat/input/journald/pkg/journalfield/default.go @@ -46,6 +46,7 @@ var journaldEventFields = FieldConversion{ "_UDEV_DEVLINK": text("journald.kernel.device_symlinks"), "_UDEV_DEVNODE": text("journald.kernel.device_node_path"), "_UDEV_SYSNAME": text("journald.kernel.device_name"), + "UNIT": text("journald.unit"), sdjournal.SD_JOURNAL_FIELD_AUDIT_LOGINUID: integer("journald.audit.login_uid"), sdjournal.SD_JOURNAL_FIELD_AUDIT_SESSION: text("journald.audit.session"), sdjournal.SD_JOURNAL_FIELD_BOOT_ID: text("journald.host.boot_id"), @@ -60,6 +61,7 @@ var journaldEventFields = FieldConversion{ sdjournal.SD_JOURNAL_FIELD_HOSTNAME: text("host.hostname"), sdjournal.SD_JOURNAL_FIELD_MACHINE_ID: text("host.id"), sdjournal.SD_JOURNAL_FIELD_MESSAGE: text("message"), + sdjournal.SD_JOURNAL_FIELD_MESSAGE_ID: text("message_id"), sdjournal.SD_JOURNAL_FIELD_PID: integer("journald.pid"), sdjournal.SD_JOURNAL_FIELD_PRIORITY: integer("syslog.priority", "log.syslog.priority"), sdjournal.SD_JOURNAL_FIELD_SYSLOG_FACILITY: integer("syslog.facility", "log.syslog.facility.code"), diff --git a/filebeat/input/journald/pkg/journalfield/matcher.go b/filebeat/input/journald/pkg/journalfield/matcher.go index 0227b2aae832..49d4e98fe1a7 100644 --- a/filebeat/input/journald/pkg/journalfield/matcher.go +++ b/filebeat/input/journald/pkg/journalfield/matcher.go @@ -36,12 +36,26 @@ type MatcherBuilder struct { Conversions map[string]Conversion } +// IncludeMatches stores the advanced matching configuratio +// provided by the user. +type IncludeMatches struct { + Matches []Matcher `config:"match"` + AND []IncludeMatches `config:"and"` + OR []IncludeMatches `config:"or"` +} + type journal interface { AddMatch(string) error AddDisjunction() error + AddConjunction() error } -var defaultBuilder = MatcherBuilder{Conversions: journaldEventFields} +var ( + defaultBuilder = MatcherBuilder{Conversions: journaldEventFields} + coreDumpMsgID = MustBuildMatcher("message_id=fc2e22bc6ee647b6b90729ab34a250b1") // matcher for messages from coredumps + journaldUID = MustBuildMatcher("journald.uid=0") // matcher for messages from root (UID 0) + journaldPID = MustBuildMatcher("journald.pid=1") // matcher for messages from init process (PID 1) +) // Build creates a new Matcher using the configured conversion table. // If no table has been configured the internal default table will be used. @@ -73,6 +87,14 @@ func BuildMatcher(in string) (Matcher, error) { return defaultBuilder.Build(in) } +func MustBuildMatcher(in string) Matcher { + m, err := BuildMatcher(in) + if err != nil { + panic(err) + } + return m +} + // IsValid returns true if the matcher was initialized correctly. func (m Matcher) IsValid() bool { return m.str != "" } @@ -118,3 +140,125 @@ func ApplyMatchersOr(j journal, matchers []Matcher) error { return nil } + +// ApplyUnitMatchers adds unit based filtering to the journal reader. +// Filtering is similar to what systemd does here: +// https://github.com/systemd/systemd/blob/641e2124de6047e6010cd2925ea22fba29b25309/src/shared/logs-show.c#L1409-L1455 +func ApplyUnitMatchers(j journal, units []string) error { + for _, unit := range units { + systemdUnit, err := BuildMatcher("systemd.unit=" + unit) + if err != nil { + return fmt.Errorf("failed to build matcher for _SYSTEMD_UNIT: %+w", err) + } + coredumpUnit, err := BuildMatcher("journald.coredump.unit=" + unit) + if err != nil { + return fmt.Errorf("failed to build matcher for COREDUMP_UNIT: %+w", err) + } + journaldUnit, err := BuildMatcher("journald.unit=" + unit) + if err != nil { + return fmt.Errorf("failed to build matcher for UNIT: %+w", err) + } + journaldObjectUnit, err := BuildMatcher("journald.object.systemd.unit=" + unit) + if err != nil { + return fmt.Errorf("failed to build matcher for OBJECT_SYSTEMD_UNIT: %+w", err) + } + + matchers := [][]Matcher{ + // match for the messages of the service + []Matcher{ + systemdUnit, + }, + // match for the coredumps of the service + []Matcher{ + coreDumpMsgID, + journaldUID, + coredumpUnit, + }, + // match for messages about the service with PID value of 1 + []Matcher{ + journaldPID, + journaldUnit, + }, + // match for messages about the service from authorized daemons + []Matcher{ + journaldUID, + journaldObjectUnit, + }, + } + if strings.HasSuffix(unit, ".slice") { + if sliceMatcher, err := BuildMatcher("systemd.slice=" + unit); err != nil { + matchers = append(matchers, []Matcher{sliceMatcher}) + } + } + + for _, m := range matchers { + if err := ApplyMatchersOr(j, m); err != nil { + return fmt.Errorf("error while setting up unit matcher for %s: %+v", unit, err) + } + } + + } + + return nil + +} + +// ApplyTransportMatcher adds matchers for the configured transports. +func ApplyTransportMatcher(j journal, transports []string) error { + if len(transports) == 0 { + return nil + } + + transportMatchers := make([]Matcher, len(transports)) + for i, transport := range transports { + transportMatcher, err := BuildMatcher("_TRANSPORT=" + transport) + if err != nil { + return err + } + transportMatchers[i] = transportMatcher + } + if err := ApplyMatchersOr(j, transportMatchers); err != nil { + return fmt.Errorf("error while adding %+v transport to matchers: %+v", transports, err) + } + return nil + +} + +// ApplySyslogIdentifierMatcher adds syslog identifier filtering to the journal reader. +func ApplySyslogIdentifierMatcher(j journal, identifiers []string) error { + identifierMatchers := make([]Matcher, len(identifiers)) + for i, identifier := range identifiers { + identifierMatchers[i] = MustBuildMatcher("syslog.identifier=" + identifier) + } + + return ApplyMatchersOr(j, identifierMatchers) +} + +// ApplyIncludeMatches adds advanced filtering to journals. +func ApplyIncludeMatches(j journal, m IncludeMatches) error { + for _, or := range m.OR { + if err := ApplyIncludeMatches(j, or); err != nil { + return err + } + if err := j.AddDisjunction(); err != nil { + return fmt.Errorf("error adding disjunction to journal: %v", err) + } + } + + for _, and := range m.AND { + if err := ApplyIncludeMatches(j, and); err != nil { + return err + } + if err := j.AddConjunction(); err != nil { + return fmt.Errorf("error adding conjunction to journal: %v", err) + } + } + + for _, match := range m.Matches { + if err := match.Apply(j); err != nil { + return fmt.Errorf("failed to apply %s expression: %+v", match.str, err) + } + } + + return nil +} diff --git a/filebeat/input/journald/pkg/journalfield/matcher_test.go b/filebeat/input/journald/pkg/journalfield/matcher_test.go index 82832f3297a7..e1e896a7bf01 100644 --- a/filebeat/input/journald/pkg/journalfield/matcher_test.go +++ b/filebeat/input/journald/pkg/journalfield/matcher_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/coreos/go-systemd/v22/sdjournal" + "github.com/stretchr/testify/require" ) func TestApplyMatchersOr(t *testing.T) { @@ -79,3 +80,25 @@ func TestApplyMatchersOr(t *testing.T) { }) } } + +func TestApplySyslogIdentifier(t *testing.T) { + journal, err := sdjournal.NewJournal() + if err != nil { + t.Fatalf("error while creating test journal: %v", err) + } + defer journal.Close() + + err = ApplySyslogIdentifierMatcher(journal, []string{"audit"}) + require.NoError(t, err) +} + +func TestApplyUnit(t *testing.T) { + journal, err := sdjournal.NewJournal() + if err != nil { + t.Fatalf("error while creating test journal: %v", err) + } + defer journal.Close() + + err = ApplyUnitMatchers(journal, []string{"docker.service"}) + require.NoError(t, err) +} diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index e65e6990c620..cabffe369c94 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -3038,10 +3038,21 @@ filebeat.inputs: #id: service-foo # You may wish to have separate inputs for each service. You can use - # include_matches to specify a list of filter expressions that are + # include_matches.or to specify a list of filter expressions that are # applied as a logical OR. You may specify filter - #include_matches: - #- _SYSTEMD_UNIT=foo.service + #include_matches.or: + #- equals: + #- _SYSTEMD_UNIT=foo.service + + # List of syslog identifiers + #syslog_identifiers: ["audit"] + + # Collect events from the service and messages about the service, + # including coredumps. + #units: ["docker.service"] + + # The list of transports (_TRANSPORT field of journald entries) + #transports: ["audit"] # Parsers are also supported, here is an example of the multiline # parser.