Collectord configuration

Download

collectorforopenshift-rhel-syslog.yaml

CURL

1curl -O https://www.outcoldsolutions.com/docs/syslog-kubernetes/collectorforopenshift-rhel-syslog.yaml

WGET

1wget https://www.outcoldsolutions.com/docs/syslog-kubernetes/collectorforopenshift-rhel-syslog.yaml

collectorforopenshift-rhel-syslog.yaml

  1apiVersion: v1
  2kind: Project
  3metadata:
  4  labels:
  5    app: collectorforopenshift-syslog
  6  name: collectorforopenshift-syslog
  7  annotations:
  8    openshift.io/node-selector: ''
  9    openshift.io/description: 'Forwarding logs to Syslog, built by Outcold Solutions'
 10    openshift.io/display-name: 'Collectord for OpenShift (Syslog)'
 11---
 12apiVersion: apiextensions.k8s.io/v1beta1
 13kind: CustomResourceDefinition
 14metadata:
 15  name: configurations.collectord.io
 16spec:
 17  group: collectord.io
 18  versions:
 19    - name: v1
 20      served: true
 21      storage: true
 22  scope: Cluster
 23  names:
 24    plural: configurations
 25    singular: configuration
 26    kind: Configuration
 27---
 28apiVersion: scheduling.k8s.io/v1beta1
 29kind: PriorityClass
 30metadata:
 31  name: collectorforopenshift-syslog-critical
 32value: 1000000000
 33---
 34apiVersion: v1
 35kind: ServiceAccount
 36metadata:
 37  labels:
 38    app: collectorforopenshift-syslog
 39  name: collectorforopenshift-syslog
 40  namespace: collectorforopenshift-syslog
 41---
 42apiVersion: v1
 43kind: ClusterRole
 44metadata:
 45  labels:
 46    app: collectorforopenshift-syslog
 47  name: collectorforopenshift-syslog
 48rules:
 49- apiGroups:
 50  - '*'
 51  resources:
 52  - '*'
 53  verbs:
 54  - get
 55  - list
 56  - watch
 57---
 58apiVersion: v1
 59kind: ClusterRoleBinding
 60metadata:
 61  labels:
 62    app: collectorforopenshift-syslog
 63  name: collectorforopenshift-syslog
 64  namespace: collectorforopenshift-syslog
 65roleRef:
 66  kind: ClusterRole
 67  name: collectorforopenshift-syslog
 68subjects:
 69  - kind: ServiceAccount
 70    name: collectorforopenshift-syslog
 71    namespace: collectorforopenshift-syslog
 72---
 73apiVersion: v1
 74kind: ConfigMap
 75metadata:
 76  name: collectorforopenshift-syslog
 77  namespace: collectorforopenshift-syslog
 78  labels:
 79    app: collectorforopenshift-syslog
 80data:
 81  001-general.conf: |
 82    # The general configuration is used for all deployments
 83    #
 84    # Run collectord with the flag `-conf` and specify location of the configuration files.
 85    #
 86    # You can override all the values using environment variables with the format like
 87    #   COLLECTOR__<ANYNAME>=<section>__<key>=<value>
 88    # As an example you can set `dataPath` in the `[general]` section as
 89    #   COLLECTOR__DATAPATH=general__dataPath=C:\\some\\path\\data.db
 90    # This parameter can be configured using -env-override, set it to empty string to disable this feature
 91
 92    [general]
 93
 94    # Review License https://www.outcoldsolutions.com/docs/license-agreement/
 95    # and accept License by changing the value to *true*
 96    acceptLicense = false
 97
 98    # Location for the database
 99    # Collectord stores positions of the files and internal state
100    dataPath = ./data/
101
102    # log level (accepted values are trace, debug, info, warn, error, fatal)
103    logLevel = info
104
105    # http server gives access to two endpoints
106    # /healthz
107    # /metrics
108    httpServerBinding =
109
110    # telemetry report endpoint, set it to empty string to disable telemetry
111    telemetryEndpoint = https://license.outcold.solutions/telemetry/
112
113    # license check endpoint
114    licenseEndpoint = https://license.outcold.solutions/license/
115
116    # license server through proxy
117    licenseServerProxyUrl =
118
119    # authentication with basic authorization (user:password)
120    licenseServerProxyBasicAuth =
121
122    # license key
123    license =
124
125    # Environment variable $KUBERNETES_NODENAME is used by default to setup hostname
126    # Use value below to override specific name
127    hostname =
128
129    # Default output for events, logs and metrics
130    # valid values: syslog and devnull
131    # Use devnull by default if you don't want to redirect data
132    defaultOutput = syslog
133
134    # Default buffer size for file input
135    fileInputBufferSize = 256b
136
137    # Maximum size of one line the file reader can read
138    fileInputLineMaxSize = 1mb
139
140    # Include custom fields to attach to every event, in example below every event sent to Syslog will have
141    # indexed field my_environment=dev. Fields names should match to ^[a-z][_a-z0-9]*$
142    # Better way to configure that is to specify labels for OpenShift Nodes.
143    # ; fields.my_environment = dev
144    # Identify the cluster if you are planning to monitor multiple clusters
145    fields.cluster = -
146
147    # Include EC2 Metadata (see list of possible fields https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
148    # Should be in format ec2Metadata.{desired_field_name} = {url path to read the value}
149    # ec2Metadata.ec2_instance_id = /latest/meta-data/instance-id
150    # ec2Metadata.ec2_instance_type = /latest/meta-data/instance-type
151
152    # subdomain for the annotations added to the pods, workloads, namespaces or containers, like syslog.collectord.io/..
153    annotationsSubdomain = syslog
154
155    # Configure acknowledgement database.
156    # - force fsync on every write to Write-Ahead-Log
157    db.fsync = false
158    # - maximum size of the Write-Ahead-Log
159    db.compactAt = 1M
160
161    # configure global thruput per second for forwarded logs (metrics are not included)
162    # for example if you set `thruputPerSecond = 512Kb`, that will limit amount of logs forwarded
163    # from the single Collectord instance to 512Kb per second.
164    # You can configure thruput individually for the logs (including specific for container logs) below
165    thruputPerSecond =
166
167    # Configure events that are too old to be forwarded, for example 168h (7 days) - that will drop all events
168    # older than 7 days
169    tooOldEvents =
170
171    # Configure events that are too new to be forwarded, for example 1h - that will drop all events that are 1h in future
172    tooNewEvents =
173
174    # connection to kubernetes api
175    [general.kubernetes]
176
177    # Environment variable $KUBERNETES_NODENAME is used by default to setup nodeName
178    # Use it only when you need to override it
179    nodeName =
180
181    # Configuration to access the API server,
182    # see https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
183    # for details
184    tokenPath = /var/run/secrets/kubernetes.io/serviceaccount/token
185    certPath = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
186
187    # Default timeout for http responses. The streaming/watch requests depend on this timeout.
188    timeout = 30m
189
190    # (obsolete) In case if pod metadata was not retrievied. how often collectord should retry to reload the pod metadata
191    # metadataFetchRetry = 5s
192
193    # (obsolete) In case if event is recent, how long pipeline should wait for the metadata to be available in Kubernetes API
194    # metadataFetchWait = 30s
195
196    # How long to keep the cache for the recent calls to API server (to limit number of calls when collectord discovers new pods)
197    metadataTTL = 30s
198
199    # regex to find pods
200    podsCgroupFilter = ^/([^/\s]+/)*kubepods(\.slice)?/((kubepods-)?(burstable|besteffort)(\.slice)?/)?([^/]*)pod([0-9a-f]{32}|[0-9a-f\-_]{36})(\.slice)?$
201
202    # regex to find containers in the pods
203    containersCgroupFilter = ^/([^/\s]+/)*kubepods(\.slice)?/((kubepods-)?(burstable|besteffort)(\.slice)?/)?([^/]*)pod([0-9a-f]{32}|[0-9a-f\-_]{36})(\.slice)?/(docker-|crio-)?[0-9a-f]{64}(\.scope)?(\/.+)?$
204
205    # path to the kubelet root location (use it to discover application logs for emptyDir)
206    # the expected format is `pods/{pod-id}/volumes/kubernetes.io~empty-dir/{volume-name}/_data/`
207    volumesRootDir = /rootfs/var/lib/kubelet/
208    # You can attach annotations as a metadata, using the format
209    #   includeAnnotations.{key} = {regexp}
210    # For example if you want to include all annotations that starts with `prometheus.io` or `example.com` you can include
211    # the following format:
212    #   includeAnnotations.1 = ^prometheus\.io.*
213    #   includeAnnotations.2 = ^example\.com.*
214
215    # watch for changes (annotations) in the objects
216    watch.namespaces = v1/namespace
217    watch.deploymentconfigs = apis/v1/apps.openshift.io/deploymentconfig
218    watch.configurations = apis/v1/collectord.io/configuration
219
220    # Syslog output
221    [output.syslog]
222
223    # tcp or udp
224    network = tcp
225    # syslog destination
226    address =
227
228  002-daemonset.conf: |
229    # DaemonSet configuration is used for Nodes and Masters.
230
231    # Connection to the docker host
232    [general.docker]
233
234    # url for docker API, only unix socket is supported
235    url = unix:///rootfs/var/run/docker.sock
236
237    # path to docker root folder (can fallback to use folder structure to read docker metadata)
238    dockerRootFolder = /rootfs/var/lib/docker/
239
240    # (obsolete) In case if pod metadata was not retrievied. how often collectord should retry to reload the pod metadata
241    # metadataFetchRetry = 5s
242
243    # (obsolete) In case if event is recent, how long pipeline should wait for the metadata to be available in Kubernetes API
244    # metadataFetchWait = 30s
245
246    # (obsolete) In case if collectord does not see new events for specific container and with the last metadata refresh
247    # We have not found this container - fow how long we should keep this metadata in cache.
248    # metadataTTL = 5s
249
250    # Timeout for http responses to docker client. The streaming requests depend on this timeout.
251    timeout = 1m
252
253    # in case of Kubernetes/OpenShift if you schedule some containers with Docker, but not with the Kubernetes
254    # that allows us to find them (by default finding all containers with name not starting with k8s_)
255    containersNameFilter = ^(([^k])|(k[^8])|(k8[^s])|(k8s[^_])).*$
256
257    # regex to find docker container cgroups (helps excluding other cgroups with matched ID)
258    containersCgroupFilter = ^(/([^/\s]+/)*(docker-|docker/)[0-9a-f]{64}(\.scope)?)$
259
260
261    // connection to CRIO
262    [general.cri-o]
263
264    # url for CRIO API, only unix socket is supported
265    url = unix:///rootfs/var/run/crio/crio.sock
266
267    # Timeout for http responses to docker client. The streaming requests depend on this timeout.
268    timeout = 1m
269
270
271    # Container Log files
272    [input.files]
273
274    # disable container logs monitoring
275    disabled = false
276
277    # root location of docker log files
278    # logs are expected in standard docker format like {containerID}/{containerID}-json.log
279    # rotated files
280    path = /rootfs/var/lib/docker/containers/
281    # root location of CRI-O files
282    # logs are expected in Kubernetes format, like {podID}/{containerName}/0.log
283    crioPath = /rootfs/var/log/pods/
284
285    # (obsolete) glob matching pattern for log files
286    # glob = */*-json.log*
287
288    # files are read using polling schema, when reach the EOF how often to check if files got updated
289    pollingInterval = 250ms
290
291    # how often to look for the new files under logs path
292    walkingInterval = 5s
293
294    # include verbose fields in events (file offset)
295    verboseFields = false
296
297    # docker splits events when they are larger than 10-100k (depends on the docker version)
298    # we join them together by default and forward to syslog as one event
299    joinPartialEvents = true
300
301    # In case if your containers report messages with terminal colors or other escape sequences
302    # you can enable strip for all the containers in one place.
303    # Better is to enable it only for required container with the label collectord.io/strip-terminal-escape-sequences=true
304    stripTerminalEscapeSequences = false
305    # Regexp used for stripping terminal colors, it does not stip all the escape sequences
306    # Read http://man7.org/linux/man-pages/man4/console_codes.4.html for more information
307    stripTerminalEscapeSequencesRegex = (\x1b\[\d{1,3}(;\d{1,3})*m)|(\x07)|(\x1b]\d+(\s\d)?;[^\x07]+\x07)|(.*\x1b\[K)
308
309    # set output (syslog or devnull, default is [general]defaultOutput)
310    output =
311
312    # configure default thruput per second for for each container log
313    # for example if you set `thruputPerSecond = 128Kb`, that will limit amount of logs forwarded
314    # from the single container to 128Kb per second.
315    thruputPerSecond =
316
317    # Configure events that are too old to be forwarded, for example 168h (7 days) - that will drop all events
318    # older than 7 days
319    tooOldEvents =
320
321    # Configure events that are too new to be forwarded, for example 1h - that will drop all events that are 1h in future
322    tooNewEvents =
323
324    # Syslog format
325    type = k8s_logs
326    syslog.format = {type}|{timestamp::format(2006-01-02T15:04:05.999999999Z07:00)}|{cluster}|{host}|{namespace}|{pod_id}|{pod_name}|{container_name}|{stream}|{message}
327
328
329    # Application Logs
330    [input.app_logs]
331
332    # disable container application logs monitoring
333    disabled = false
334
335    # root location of mounts (applies to hostPath mounts only), if the hostPath differs inside container from the path on host
336    root = /rootfs/
337
338    # how often to review list of available volumes
339    syncInterval = 5s
340
341    # glob matching pattern for log files
342    glob = *.log*
343
344    # files are read using polling schema, when reach the EOF how often to check if files got updated
345    pollingInterval = 250ms
346
347    # how often to look for the new files under logs path
348    walkingInterval = 5s
349
350    # include verbose fields in events (file offset)
351    verboseFields = false
352
353    # we split files using new line character, with this configuration you can specify what defines the new event
354    # after new line
355    eventPatternRegex = ^[^\s]
356    # Maximum interval of messages in pipeline
357    eventPatternMaxInterval = 100ms
358    # Maximum time to wait for the messages in pipeline
359    eventPatternMaxWait = 1s
360    # Maximum message size
361    eventPatternMaxSize = 1MB
362
363    # set output (syslog or devnull, default is [general]defaultOutput)
364    output =
365
366    # configure default thruput per second for for each container log
367    # for example if you set `thruputPerSecond = 128Kb`, that will limit amount of logs forwarded
368    # from the single container to 128Kb per second.
369    thruputPerSecond =
370
371    # Configure events that are too old to be forwarded, for example 168h (7 days) - that will drop all events
372    # older than 7 days
373    tooOldEvents =
374
375    # Configure events that are too new to be forwarded, for example 1h - that will drop all events that are 1h in future
376    tooNewEvents =
377
378    # Syslog format
379    type = k8s_logs
380    syslog.format = {type}|{timestamp::format(2006-01-02T15:04:05.999999999Z07:00)}|{cluster}|{host}|{namespace}|{pod_id}|{pod_name}|{container_name}|{file_name}|{message}
381
382
383    [input.journald]
384
385    # disable host level logs
386    disabled = false
387
388    # root location of log files
389    path.persistent = /rootfs/var/log/journal/
390    path.volatile = /rootfs/run/log/journal/
391
392    # when reach end of journald, how often to pull
393    pollingInterval = 250ms
394
395    # sample output (-1 does not sample, 20 - only 20% of the logs should be forwarded)
396    samplingPercent = -1
397
398    # sampling key (should be regexp with the named match pattern `key`)
399    samplingKey =
400
401    # how often to reopen the journald to free old files
402    reopenInterval = 1h
403
404    # set output (syslog or devnull, default is [general]defaultOutput)
405    output =
406
407    # configure default thruput per second for this files group
408    # for example if you set `thruputPerSecond = 128Kb`, that will limit amount of logs forwarded
409    # from the files in this group to 128Kb per second.
410    thruputPerSecond =
411
412    # Configure events that are too old to be forwarded, for example 168h (7 days) - that will drop all events
413    # older than 7 days
414    tooOldEvents =
415
416    # Configure events that are too new to be forwarded, for example 1h - that will drop all events that are 1h in future
417    tooNewEvents =
418
419    # syslog format
420    type = k8s_host_logs
421    syslog.format = {type}|{timestamp::format(2006-01-02T15:04:05.999999999Z07:00)}|{cluster}|{host}|journald|{message}
422
423
424    # Pipe to join events (container logs only)
425    [pipe.join]
426
427    # disable joining event
428    disabled = false
429
430    # Maximum interval of messages in pipeline
431    maxInterval = 100ms
432
433    # Maximum time to wait for the messages in pipeline
434    maxWait = 1s
435
436    # Maximum message size
437    maxSize = 1MB
438
439    # Default pattern to indicate new message (should start not from space)
440    patternRegex = ^[^\s]
441
442  003-daemonset-master.conf: |
443
444    # Audit logs
445    [input.files::audit-logs]
446
447    # disable host level logs
448    disabled = false
449
450    # root location of for audit logs
451    path = /rootfs/var/log/kube-apiserver/
452
453    # glob matching files
454    glob = audit*.log
455
456    # files are read using polling schema, when reach the EOF how often to check if files got updated
457    pollingInterval = 250ms
458
459    # how often o look for the new files under logs path
460    walkingInterval = 5s
461
462    # include verbose fields in events (file offset)
463    verboseFields = false
464
465    # field extraction
466    extraction =
467    # extractionMessageField =
468
469    # timestamp field
470    timestampField =
471
472    # format for timestamp
473    # the layout defines the format by showing how the reference time, defined to be `Mon Jan 2 15:04:05 -0700 MST 2006`
474    timestampFormat =
475
476    # timestamp location (if not defined by format)
477    timestampLocation =
478
479    # set output (syslog or devnull, default is [general]defaultOutput)
480    output =
481
482    # configure default thruput per second for this files group
483    # for example if you set `thruputPerSecond = 128Kb`, that will limit amount of logs forwarded
484    # from the files in this group to 128Kb per second.
485    thruputPerSecond =
486
487    # Configure events that are too old to be forwarded, for example 168h (7 days) - that will drop all events
488    # older than 7 days
489    tooOldEvents =
490
491    # Configure events that are too new to be forwarded, for example 1h - that will drop all events that are 1h in future
492    tooNewEvents =
493
494    type = k8s_audit_logs
495    syslog.format = {type}|{timestamp::format(2006-01-02T15:04:05.999999999Z07:00)}|{cluster}|{message}
496
497
498  004-addon.conf: |
499    [general]
500    # addons can be run in parallel with agents
501    addon = true
502
503
504    [input.kubernetes_events]
505
506    # disable collecting kubernetes events
507    disabled = false
508
509    # (obsolete, depends on kubernetes timeout)
510    # Set the timeout for how long request to watch events going to hang reading.
511    # eventsWatchTimeout = 30m
512
513    # (obsolete, depends on kubernetes timeout)
514    # Ignore events last seen later that this duration.
515    # eventsTTL = 12h
516
517    # set output (syslog or devnull, default is [general]defaultOutput)
518    output =
519
520    # syslog format
521    type = k8s_events
522    syslog.format = {type}|{timestamp::format(2006-01-02T15:04:05.999999999Z07:00)}|{cluster}|{message}
523
524---
525apiVersion: extensions/v1beta1
526kind: DaemonSet
527metadata:
528  name: collectorforopenshift-syslog
529  namespace: collectorforopenshift-syslog
530  labels:
531    app: collectorforopenshift-syslog
532spec:
533  # Default updateStrategy is OnDelete. For collectord RollingUpdate is suitable
534  # When you update configuration
535  updateStrategy:
536    type: RollingUpdate
537
538  selector:
539    matchLabels:
540      daemon: collectorforopenshift-syslog
541
542  template:
543    metadata:
544      name: collectorforopenshift-syslog
545      labels:
546        daemon: collectorforopenshift-syslog
547    spec:
548      priorityClassName: collectorforopenshift-syslog-critical
549      dnsPolicy: ClusterFirstWithHostNet
550      hostNetwork: true
551      serviceAccountName: collectorforopenshift-syslog
552      # We run this DaemonSet only for Non-Masters
553      affinity:
554        nodeAffinity:
555          requiredDuringSchedulingIgnoredDuringExecution:
556            nodeSelectorTerms:
557            - matchExpressions:
558              - key: node-role.kubernetes.io/master
559                operator: DoesNotExist
560      tolerations:
561      - operator: "Exists"
562        effect: "NoSchedule"
563      - operator: "Exists"
564        effect: "NoExecute"
565      containers:
566      - name: collectorforopenshift-syslog
567        # Stick to specific version
568        image: registry.connect.redhat.com/outcoldsolutions/collectorforopenshift:25.10.3
569        securityContext:
570          privileged: true
571          runAsUser: 0
572        # Define your resources if you need. Defaults should be fine for most.
573        resources:
574          limits:
575            cpu: 2
576            memory: 512Mi
577          requests:
578            cpu: 200m
579            memory: 192Mi
580        env:
581        - name: KUBERNETES_NODENAME
582          valueFrom:
583            fieldRef:
584              fieldPath: spec.nodeName
585        - name: POD_NAME
586          valueFrom:
587            fieldRef:
588              fieldPath: metadata.name
589        volumeMounts:
590        # We store state in /data folder (file positions)
591        - name: collectorforopenshift-syslog-state
592          mountPath: /data
593        # Configuration file deployed with ConfigMap
594        - name: collectorforopenshift-syslog-config
595          mountPath: /config/
596          readOnly: true
597        # Cgroup filesystem to get metrics
598        - name: cgroup
599          mountPath: /rootfs/sys/fs/cgroup
600          readOnly: true
601        # Proc filesystem to get metrics
602        - name: proc
603          mountPath: /rootfs/proc
604          readOnly: true
605        # Location of docker root (for container logs and metadata)
606        - name: docker-root
607          mountPath: /rootfs/var/lib/docker/
608          readOnly: true
609          mountPropagation: HostToContainer
610        # Docker socket
611        - name: docker-unix-socket
612          mountPath: /rootfs/var/run/docker.sock
613          readOnly: true
614        # CRI-O socket (if using CRI-O runtime)
615        - name: crio-unix-socket
616          mountPath: /rootfs/var/run/crio/
617          readOnly: true
618        # Host logs location (including CRI-O logs)
619        - name: logs
620          mountPath: /rootfs/var/log/
621          readOnly: true
622        - name: run-logs
623          mountPath: /rootfs/run/log/
624          readOnly: true
625        # Application logs
626        - name: volumes-root
627          mountPath: /rootfs/var/lib/kubelet/
628          readOnly: true
629          mountPropagation: HostToContainer
630        # correct timezone
631        - name: localtime
632          mountPath: /etc/localtime
633          readOnly: true
634      volumes:
635      # We store state directly on host, change this location, if
636      # your persistent volume is somewhere else
637      - name: collectorforopenshift-syslog-state
638        hostPath:
639          path: /var/lib/collectorforopenshift-syslog/data/
640      # Location of docker root (for container logs and metadata)
641      - name: docker-root
642        hostPath:
643          path: /var/lib/docker/
644      # Location of cgroups file system
645      - name: cgroup
646        hostPath:
647          path: /sys/fs/cgroup
648      # Location of proc file system
649      - name: proc
650        hostPath:
651          path: /proc
652      # Host logs location (including CRI-O logs)
653      - name: logs
654        hostPath:
655          path: /var/log
656      - name: run-logs
657        hostPath:
658          path: /run/log
659      # Docker socket
660      - name: docker-unix-socket
661        hostPath:
662          path: /var/run/docker.sock
663      # CRI-O socket (if using CRI-O runtime)
664      - name: crio-unix-socket
665        hostPath:
666          path: /var/run/crio/
667      # Location for origin mounts, to autodiscover application logs
668      - name: volumes-root
669        hostPath:
670          path: /var/lib/kubelet/
671      # correct timezone
672      - name: localtime
673        hostPath:
674          path: /etc/localtime
675      # configuration from ConfigMap
676      - name: collectorforopenshift-syslog-config
677        configMap:
678          name: collectorforopenshift-syslog
679          items:
680          - key: 001-general.conf
681            path: 001-general.conf
682          - key: 002-daemonset.conf
683            path: 002-daemonset.conf
684---
685apiVersion: extensions/v1beta1
686kind: DaemonSet
687metadata:
688  name: collectorforopenshift-syslog-master
689  namespace: collectorforopenshift-syslog
690  labels:
691    app: collectorforopenshift-syslog
692spec:
693  updateStrategy:
694    type: RollingUpdate
695
696  selector:
697    matchLabels:
698      daemon: collectorforopenshift-syslog
699
700  template:
701    metadata:
702      name: collectorforopenshift-syslog-master
703      labels:
704        daemon: collectorforopenshift-syslog
705    spec:
706      priorityClassName: collectorforopenshift-syslog-critical
707      dnsPolicy: ClusterFirstWithHostNet
708      hostNetwork: true
709      serviceAccountName: collectorforopenshift-syslog
710      # Deploy only on master
711      affinity:
712        nodeAffinity:
713          requiredDuringSchedulingIgnoredDuringExecution:
714            nodeSelectorTerms:
715            - matchExpressions:
716              - key: node-role.kubernetes.io/master
717                operator: Exists
718      tolerations:
719      - operator: "Exists"
720        effect: "NoSchedule"
721      - operator: "Exists"
722        effect: "NoExecute"
723      containers:
724      - name: collectorforopenshift-syslog
725        image: registry.connect.redhat.com/outcoldsolutions/collectorforopenshift:25.10.3
726        securityContext:
727          privileged: true
728          runAsUser: 0
729        resources:
730          limits:
731            cpu: 1
732            memory: 512Mi
733          requests:
734            cpu: 200m
735            memory: 192Mi
736        env:
737        - name: KUBERNETES_NODENAME
738          valueFrom:
739            fieldRef:
740              fieldPath: spec.nodeName
741        - name: POD_NAME
742          valueFrom:
743            fieldRef:
744              fieldPath: metadata.name
745        volumeMounts:
746        - name: collectorforopenshift-syslog-state
747          mountPath: /data
748        - name: collectorforopenshift-syslog-config
749          mountPath: /config/
750          readOnly: true
751        - name: cgroup
752          mountPath: /rootfs/sys/fs/cgroup
753          readOnly: true
754        - name: proc
755          mountPath: /rootfs/proc
756          readOnly: true
757        - name: docker-logs
758          mountPath: /rootfs/var/lib/docker/
759          readOnly: true
760          mountPropagation: HostToContainer
761        - name: docker-unix-socket
762          mountPath: /rootfs/var/run/docker.sock
763          readOnly: true
764        - name: crio-unix-socket
765          mountPath: /rootfs/var/run/crio/
766          readOnly: true
767        - name: logs
768          mountPath: /rootfs/var/log/
769          readOnly: true
770        - name: run-logs
771          mountPath: /rootfs/run/log/
772          readOnly: true
773        - name: etcd-certs
774          mountPath: /rootfs/etc/kubernetes/static-pod-resources/etcd-member/
775          readOnly: true
776        - name: volumes-root
777          mountPath: /rootfs/var/lib/kubelet/
778          readOnly: true
779          mountPropagation: HostToContainer
780        - name: localtime
781          mountPath: /etc/localtime
782          readOnly: true
783      volumes:
784      - name: collectorforopenshift-syslog-state
785        hostPath:
786          path: /var/lib/collectorforopenshift-syslog/data/
787      - name: docker-logs
788        hostPath:
789          path: /var/lib/docker/
790      - name: cgroup
791        hostPath:
792          path: /sys/fs/cgroup
793      - name: proc
794        hostPath:
795          path: /proc
796      - name: logs
797        hostPath:
798          path: /var/log
799      - name: run-logs
800        hostPath:
801          path: /run/log
802      - name: docker-unix-socket
803        hostPath:
804          path: /var/run/docker.sock
805      - name: crio-unix-socket
806        hostPath:
807          path: /var/run/crio/
808      - name: etcd-certs
809        hostPath:
810          path: /etc/kubernetes/static-pod-resources/etcd-member/
811      - name: volumes-root
812        hostPath:
813          path: /var/lib/kubelet/
814      - name: localtime
815        hostPath:
816          path: /etc/localtime
817      - name: collectorforopenshift-syslog-config
818        configMap:
819          name: collectorforopenshift-syslog
820          items:
821          - key: 001-general.conf
822            path: 001-general.conf
823          - key: 002-daemonset.conf
824            path: 002-daemonset.conf
825          - key: 003-daemonset-master.conf
826            path: 003-daemonset-master.conf
827---
828apiVersion: apps/v1beta1
829kind: Deployment
830metadata:
831  name: collectorforopenshift-syslog-addon
832  namespace: collectorforopenshift-syslog
833  labels:
834    app: collectorforopenshift-syslog
835spec:
836  replicas: 1
837  selector:
838    matchLabels:
839      daemon: collectorforopenshift-syslog
840  template:
841    metadata:
842      name: collectorforopenshift-syslog-addon
843      labels:
844        daemon: collectorforopenshift-syslog
845    spec:
846      priorityClassName: collectorforopenshift-syslog-critical
847      serviceAccountName: collectorforopenshift-syslog
848      containers:
849      - name: collectorforopenshift-syslog
850        image: registry.connect.redhat.com/outcoldsolutions/collectorforopenshift:25.10.3
851        securityContext:
852          privileged: true
853          runAsUser: 0
854        resources:
855          limits:
856            cpu: 500m
857            memory: 256Mi
858          requests:
859            cpu: 50m
860            memory: 64Mi
861        env:
862        - name: KUBERNETES_NODENAME
863          valueFrom:
864            fieldRef:
865              fieldPath: spec.nodeName
866        - name: POD_NAME
867          valueFrom:
868            fieldRef:
869              fieldPath: metadata.name
870        volumeMounts:
871        - name: collectorforopenshift-syslog-state
872          mountPath: /data
873        - name: collectorforopenshift-syslog-config
874          mountPath: /config/
875          readOnly: true
876      volumes:
877      - name: collectorforopenshift-syslog-state
878        hostPath:
879          path: /var/lib/collectorforopenshift-syslog/data/
880      - name: collectorforopenshift-syslog-config
881        configMap:
882          name: collectorforopenshift-syslog
883          items:
884          - key: 001-general.conf
885            path: 001-general.conf
886          - key: 004-addon.conf
887            path: 004-addon.conf

About Outcold Solutions

Outcold Solutions provides solutions for monitoring Kubernetes, OpenShift and Docker clusters in Splunk Enterprise and Splunk Cloud. We offer certified Splunk applications, which give you insights across all container environments. We are helping businesses reduce complexity related to logging and monitoring by providing easy-to-use and easy-to-deploy solutions for Linux and Windows containers. We deliver applications, which help developers monitor their applications and help operators keep their clusters healthy. With the power of Splunk Enterprise and Splunk Cloud, we offer one solution to help you keep all the metrics and logs in one place, allowing you to quickly address complex questions on container performance.

Red Hat
Splunk
AWS