Outcold Solutions LLC

Collector configuration file for collectorfordocker

Configuration file collector.conf is mapped to the /config/collector.conf inside the image. You can download the configuration file, override settings and map it to the same location.

Download

collector.conf

curl

$ curl -O https://www.outcoldsolutions.com/docs/monitoring-docker/configuration/collector.conf

wget

$ wget https://www.outcoldsolutions.com/docs/monitoring-docker/configuration/collector.conf

collector.conf

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
# collector configuration file
#
# Run collector with flag -conf and specify location of the configuration file.
#
# You can override all the values using environment variables with the format like
#   COLLECTOR__<ANYNAME>=<section>__<key>=<value>
# As an example you can set dataPath in [general] section as
#   COLLECTOR__DATAPATH=general__dataPath=C:\\some\\path\\data.db
# This parameter can be configured using -env-override, set it to empty string to disable this feature

[general]

# Please review EULA https://www.outcoldsolutions.com/docs/license-agreement/
# and accept eula by uncommenting this code and changing value to *true*
; acceptEULA = false

# location for the database
# is used to store position of the files and internal state
; dataPath = ./data/

# log level (trace, debug, info, warn, error, fatal)
; logLevel = info

# http server gives access to two endpoints
# /healthz
# /metrics
; httpServerBinding = :8080

# telemetry report endpoint, set it to empty string to disable telemetry
; telemetryEndpoint = https://license.outcold.solutions/telemetry/

# license check endpoint
; licenseEndpoint = https://license.outcold.solutions/license/

# license server through proxy
; licenseServerProxyUrl =

# license
; license =

# docker daemon hostname is used by default as hostname
# use this configuration to override
; hostname =

# Include custom fields to attach to every event, in example below every event sent to Splunk will hav
# indexed field my_environment=dev. Fields names should match to ^[a-z][_a-z0-9]*$
# Better way to configure that is to specify labels for Docker Hosts.
# ; fields.my_environment = dev


# connection to docker host
[general.docker]

# url for docker API, only unix socket is supported
; url = unix:///var/run/docker.sock

# path to docker root folder (can fallback to use folder structure to read docker metadata)
; dockerRootFolder =

# In case if pod metadata was not retrievied. how often collector should retry to reload the pod metadata
; MetadataFetchRetry = 5s

# In case if event is recent, how long pipeline should wait for the metadata to be available in Kubernetes API
; MetadataFetchWait = 30s

# In case if collector does not see new events for specific container and with the last metadata refresh
# We have not found this container - fow how long we should keep this metadata in cache.
; MetadataTTL = 5m


# cgroup input
[input.system_stats]

# disable system level stats
; disabled = false

# cgroups fs location
; pathCgroups = /rootfs/sys/fs/cgroup

# proc location
; pathProc = /rootfs/proc

# how often to collect cgroup stats
; statsInterval = 30s

# override type
; type = docker_stats

# specify Splunk index
; index =


# proc input
[input.proc_stats]

# disable proc level stats
; disabled = false

# proc location
; pathProc = /rootfs/proc

# how often to collect proc stats
; statsInterval = 30s

# override type
; type = docker_proc_stats

# specify Splunk index
; index =


# Log files
[input.files]

# disable container logs monitoring
; disabled = false

# root location of docker files
; path = /var/lib/docker/containers/

# glob matching pattern for log files
; glob = */*-json.log*

# files are read using polling schema, when reach the EOF how often to check if files got updated
; pollingInterval = 250ms

# how often to look for the new files under logs path
; walkingInterval = 5s

# include verbose fields in events (file offset)
; verboseFields = false

# override type
; type = docker_logs

# specify Splunk index
; index =


# Input syslog(.\d+)? files
[input.files::syslog]

# disable host level logs
; disabled = false

# root location of docker files
path = /rootfs/var/log/

# regex matching pattern
match = ^(syslog|messages)(.\d+)?$

# limit search only on one level
recursive = false

# files are read using polling schema, when reach the EOF how often to check if files got updated
pollingInterval = 250ms

# how often o look for the new files under logs path
walkingInterval = 5s

# include verbose fields in events (file offset)
verboseFields = false

# override type
type = docker_host_logs

# specify Splunk index
; index =

# field extraction
extraction = ^(?P<timestamp>[A-Za-z]+\s+\d+\s\d+:\d+:\d+)\s(?P<syslog_hostname>[^\s]+)\s(?P<syslog_component>[^:\[]+)(\[(?P<syslog_pid>\d+)\])?: (.+)$

# timestamp field
timestampField = timestamp

# format for timestamp
# the layout defines the format by showing how the reference time, defined to be `Mon Jan 2 15:04:05 -0700 MST 2006`
timestampFormat = Jan 2 15:04:05

# Adjust date, if month/day aren't set in format
; timestampSetMonth = false
; timestampSetDay = false

# timestamp location (if not defined by format)
timestampLocation = Local


# Input all *.log(.\d+)? files
[input.files::logs]

# disable host level logs
; disabled = false

# root location of docker files
path = /rootfs/var/log/

# regex matching pattern
match = ^[\w]+\.log(.\d+)?$

# files are read using polling schema, when reach the EOF how often to check if files got updated
pollingInterval = 250ms

# how often o look for the new files under logs path
walkingInterval = 5s

# include verbose fields in events (file offset)
verboseFields = false

# override type
type = docker_host_logs

# specify Splunk index
; index =

# field extraction
; extraction =

# timestamp field
; timestampField =

# format for timestamp
# the layout defines the format by showing how the reference time, defined to be `Mon Jan 2 15:04:05 -0700 MST 2006`
; timestampFormat =

# timestamp location (if not defined by format)
; timestampLocation =


# Docker input (events)
[input.docker_events]

# disable docker events
; disabled = false

# interval to poll for new events in docker
; eventsPollingInterval = 5s

# override type
; type = docker_events

# specify Splunk index
; index =


# Splunk output
[output.splunk]

# Splunk HTTP Event Collector url
; url =

# Splunk HTTP Event Collector Token
; token =

# Allow invalid SSL server certificate
; insecure = false

# Path to CA cerificate
; caPath =

# CA Name to verify
; caName =

# Events are batched with the maximum size set by batchSize and staying in pipeline for not longer
# than set by frequency
; frequency = 5s
; batchSize = 768K

# Splunk through proxy
; proxyUrl =

# Splunk acknowledgement url (.../services/collector/ack)
; ackUrl =

# Enable index acknowledgment
; ackEnabled = false

# Index acknowledgment timeout
; ackTimeout = 3m


# Pipe to join events (container logs only)
[pipe.join]

# disable joining event
; disabled = false

# Maximum interval of messages in pipeline
; maxInterval = 100ms

# Maximum time to wait for the messages in pipeline
; maxWait = 1s

# Maximum message size
; maxSize = 100K

# Default pattern to indicate new message (should start not from space)
; patternRegex = ^[^\s]


# Define special event join patterns for matched events
# Section consist of [pipe.join::<name>]
# [pipe.join::my_app]
## Set match pattern for the fields
#; matchRegex.docker_container_image = my_app
#; matchRegex.docker_stream = stdout
## All events start from '[<digits>'
#; patternRegex = ^\[\d+

About Outcold Solutions

Outcold Solutions provides solutions for monitoring Kubernetes, OpenShift and Docker clusters in Splunk Enterprise and Splunk Cloud. We offer certified Splunk applications, which gives you insights across all containers environments. We are helping businesses reduce complexity related to logging and monitoring by providing easy-to-use and deploy solutions for Linux and Windows containers. We deliver applications, which helps developers monitor their applications and operators to keep their clusters healthy. With the power of Splunk Enterprise and Splunk Cloud, we offer one solution to help you keep all the metrics and logs in one place, allowing you to quickly address complex questions on container performance.