forked from kube-logging/logging-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlogging.banzaicloud.io_clusteroutputs.yaml
3779 lines (3778 loc) · 186 KB
/
logging.banzaicloud.io_clusteroutputs.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: clusteroutputs.logging.banzaicloud.io
spec:
group: logging.banzaicloud.io
names:
kind: ClusterOutput
listKind: ClusterOutputList
plural: clusteroutputs
singular: clusteroutput
scope: ""
validation:
openAPIV3Schema:
description: ClusterOutput is the Schema for the clusteroutputs API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ClusterOutputSpec contains Kubernetes spec for CLusterOutput
properties:
azurestorage:
properties:
auto_create_container:
description: 'Automatically create container if not exists(default:
true)'
type: boolean
azure_container:
description: Your azure storage container
type: string
azure_object_key_format:
description: 'Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension})'
type: string
azure_storage_access_key:
description: Your azure storage access key
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
type: object
azure_storage_account:
description: Your azure storage account
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
type: object
azure_storage_type:
description: 'Azure storage type currently only "blob" supported
(default: blob)'
type: string
buffer:
properties:
chunk_full_threshold:
description: The percentage of chunk size threshold for flushing.
output plugin will flush the chunk when actual size reaches
chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
default)
type: string
chunk_limit_records:
description: The max number of events that each chunks can store
in it
type: integer
chunk_limit_size:
description: 'The max size of each chunks: events will be written
into chunks until the size of chunks become this size'
type: string
compress:
description: If you set this option to gzip, you can get Fluentd
to compress data records before writing to buffer chunks.
type: string
delayed_commit_timeout:
description: The timeout seconds until output plugin decides
that async write operation fails
type: string
disable_chunk_backup:
description: Instead of storing unrecoverable chunks in the
backup directory, just discard them. This option is new in
Fluentd v1.2.6.
type: boolean
flush_at_shutdown:
description: The value to specify to flush/write all buffer
chunks at shutdown, or not
type: boolean
flush_interval:
description: 'Default: 60s'
type: string
flush_mode:
description: 'Default: default (equals to lazy if time is specified
as chunk key, interval otherwise) lazy: flush/write chunks
once per timekey interval: flush/write chunks per specified
time via flush_interval immediate: flush/write chunks immediately
after events are appended into chunks'
type: string
flush_thread_burst_interval:
description: The sleep interval seconds of threads between flushes
when output plugin flushes waiting chunks next to next
type: string
flush_thread_count:
description: The number of threads of output plugins, which
is used to write chunks in parallel
type: integer
flush_thread_interval:
description: The sleep interval seconds of threads to wait next
flush trial (when no chunks are waiting)
type: string
overflow_action:
description: 'How output plugin behaves when its buffer queue
is full throw_exception: raise exception to show this error
in log block: block processing of input plugin to emit events
into that buffer drop_oldest_chunk: drop/purge oldest chunk
to accept newly incoming chunk'
type: string
path:
description: 'The path where buffer chunks are stored. The ''*''
is replaced with random characters. It''s highly recommended
to leave this default. (default: operator generated)'
type: string
queue_limit_length:
description: The queue length limitation of this buffer plugin
instance
type: integer
queued_chunks_limit_size:
description: Limit the number of queued chunks. If you set smaller
flush_interval, e.g. 1s, there are lots of small queued chunks
in buffer. This is not good with file buffer because it consumes
lots of fd resources when output destination has a problem.
This parameter mitigates such situations.
type: integer
retry_exponential_backoff_base:
description: The base number of exponential backoff for retries
type: string
retry_forever:
description: If true, plugin will ignore retry_timeout and retry_max_times
options and retry flushing forever
type: boolean
retry_max_interval:
description: The maximum interval seconds for exponential backoff
between retries while failing
type: string
retry_max_times:
description: The maximum number of times to retry to flush while
failing
type: integer
retry_randomize:
description: If true, output plugin will retry after randomized
interval not to do burst retries
type: boolean
retry_secondary_threshold:
description: The ratio of retry_timeout to switch to use secondary
while failing (Maximum valid value is 1.0)
type: string
retry_timeout:
description: The maximum seconds to retry to flush while failing,
until plugin discards buffer chunks
type: string
retry_type:
description: 'exponential_backoff: wait seconds will become
large exponentially per failures periodic: output plugin will
retry periodically with fixed intervals (configured via retry_wait)'
type: string
retry_wait:
description: Seconds to wait before next retry to flush, or
constant factor of exponential backoff
type: string
tags:
description: 'When tag is specified as buffer chunk key, output
plugin writes events into chunks separately per tags. (default:
tag,time)'
type: string
timekey:
description: Output plugin will flush chunks per specified time
(enabled when time is specified in chunk keys)
type: string
timekey_use_utc:
description: Output plugin decides to use UTC or not to format
placeholders using timekey
type: boolean
timekey_wait:
description: Output plugin writes chunks after timekey_wait
seconds later after timekey expiration
type: string
timekey_zone:
description: The timezone (-0700 or Asia/Tokyo) string for formatting
timekey placeholders
type: string
total_limit_size:
description: The size limitation of this buffer plugin instance.
Once the total size of stored buffer reached this threshold,
all append operations will fail with error (and data will
be lost)
type: string
type:
description: Fluentd core bundles memory and file plugins. 3rd
party plugins are also available when installed.
type: string
type: object
format:
description: 'Compat format type: out_file, json, ltsv (default:
out_file)'
type: string
path:
description: Path prefix of the files on Azure
type: string
store_as:
description: 'Store as: gzip, json, text, lzo, lzma2 (default: gzip)'
type: string
required:
- azure_container
- azure_storage_access_key
- azure_storage_account
type: object
cloudwatch:
properties:
auto_create_stream:
description: ' Create log group and stream automatically. (default:
false)'
type: boolean
aws_instance_profile_credentials_retries:
description: 'Instance Profile Credentials call retries (default:
nil)'
type: integer
aws_key_id:
description: AWS access key id
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
type: object
aws_sec_key:
description: AWS secret key.
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
type: object
aws_sts_role_arn:
description: The role ARN to assume when using cross-account sts
authentication
type: string
aws_sts_session_name:
description: 'The session name to use with sts authentication (default:
''fluentd'')'
type: string
aws_use_sts:
description: Enable AssumeRoleCredentials to authenticate, rather
than the default credential hierarchy. See 'Cross-Account Operation'
below for more detail.
type: boolean
buffer:
properties:
chunk_full_threshold:
description: The percentage of chunk size threshold for flushing.
output plugin will flush the chunk when actual size reaches
chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
default)
type: string
chunk_limit_records:
description: The max number of events that each chunks can store
in it
type: integer
chunk_limit_size:
description: 'The max size of each chunks: events will be written
into chunks until the size of chunks become this size'
type: string
compress:
description: If you set this option to gzip, you can get Fluentd
to compress data records before writing to buffer chunks.
type: string
delayed_commit_timeout:
description: The timeout seconds until output plugin decides
that async write operation fails
type: string
disable_chunk_backup:
description: Instead of storing unrecoverable chunks in the
backup directory, just discard them. This option is new in
Fluentd v1.2.6.
type: boolean
flush_at_shutdown:
description: The value to specify to flush/write all buffer
chunks at shutdown, or not
type: boolean
flush_interval:
description: 'Default: 60s'
type: string
flush_mode:
description: 'Default: default (equals to lazy if time is specified
as chunk key, interval otherwise) lazy: flush/write chunks
once per timekey interval: flush/write chunks per specified
time via flush_interval immediate: flush/write chunks immediately
after events are appended into chunks'
type: string
flush_thread_burst_interval:
description: The sleep interval seconds of threads between flushes
when output plugin flushes waiting chunks next to next
type: string
flush_thread_count:
description: The number of threads of output plugins, which
is used to write chunks in parallel
type: integer
flush_thread_interval:
description: The sleep interval seconds of threads to wait next
flush trial (when no chunks are waiting)
type: string
overflow_action:
description: 'How output plugin behaves when its buffer queue
is full throw_exception: raise exception to show this error
in log block: block processing of input plugin to emit events
into that buffer drop_oldest_chunk: drop/purge oldest chunk
to accept newly incoming chunk'
type: string
path:
description: 'The path where buffer chunks are stored. The ''*''
is replaced with random characters. It''s highly recommended
to leave this default. (default: operator generated)'
type: string
queue_limit_length:
description: The queue length limitation of this buffer plugin
instance
type: integer
queued_chunks_limit_size:
description: Limit the number of queued chunks. If you set smaller
flush_interval, e.g. 1s, there are lots of small queued chunks
in buffer. This is not good with file buffer because it consumes
lots of fd resources when output destination has a problem.
This parameter mitigates such situations.
type: integer
retry_exponential_backoff_base:
description: The base number of exponential backoff for retries
type: string
retry_forever:
description: If true, plugin will ignore retry_timeout and retry_max_times
options and retry flushing forever
type: boolean
retry_max_interval:
description: The maximum interval seconds for exponential backoff
between retries while failing
type: string
retry_max_times:
description: The maximum number of times to retry to flush while
failing
type: integer
retry_randomize:
description: If true, output plugin will retry after randomized
interval not to do burst retries
type: boolean
retry_secondary_threshold:
description: The ratio of retry_timeout to switch to use secondary
while failing (Maximum valid value is 1.0)
type: string
retry_timeout:
description: The maximum seconds to retry to flush while failing,
until plugin discards buffer chunks
type: string
retry_type:
description: 'exponential_backoff: wait seconds will become
large exponentially per failures periodic: output plugin will
retry periodically with fixed intervals (configured via retry_wait)'
type: string
retry_wait:
description: Seconds to wait before next retry to flush, or
constant factor of exponential backoff
type: string
tags:
description: 'When tag is specified as buffer chunk key, output
plugin writes events into chunks separately per tags. (default:
tag,time)'
type: string
timekey:
description: Output plugin will flush chunks per specified time
(enabled when time is specified in chunk keys)
type: string
timekey_use_utc:
description: Output plugin decides to use UTC or not to format
placeholders using timekey
type: boolean
timekey_wait:
description: Output plugin writes chunks after timekey_wait
seconds later after timekey expiration
type: string
timekey_zone:
description: The timezone (-0700 or Asia/Tokyo) string for formatting
timekey placeholders
type: string
total_limit_size:
description: The size limitation of this buffer plugin instance.
Once the total size of stored buffer reached this threshold,
all append operations will fail with error (and data will
be lost)
type: string
type:
description: Fluentd core bundles memory and file plugins. 3rd
party plugins are also available when installed.
type: string
type: object
concurrency:
description: 'Use to set the number of threads pushing data to CloudWatch.
(default: 1)'
type: integer
endpoint:
description: Use this parameter to connect to the local API endpoint
(for testing)
type: string
format:
properties:
type:
description: 'Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value
(default: json)'
enum:
- out_file
- json
- ltsv
- csv
- msgpack
- hash
- single_value
type: string
type: object
http_proxy:
description: Use to set an optional HTTP proxy
type: string
include_time_key:
description: 'Include time key as part of the log entry (default:
UTC)'
type: boolean
json_handler:
description: Name of the library to be used to handle JSON data.
For now, supported libraries are json (default) and yajl
type: string
localtime:
description: Use localtime timezone for include_time_key output
(overrides UTC default)
type: boolean
log_group_aws_tags:
description: Set a hash with keys and values to tag the log group
resource
type: string
log_group_aws_tags_key:
description: Specified field of records as AWS tags for the log
group
type: string
log_group_name:
description: Name of log group to store logs
type: string
log_rejected_request:
description: 'Output rejected_log_events_info request log. (default:
false)'
type: string
log_stream_name:
description: Name of log stream to store logs
type: string
log_stream_name_key:
description: Specified field of records as log stream name
type: string
max_events_per_batch:
description: 'Maximum number of events to send at once (default:
10000)'
type: integer
max_message_length:
description: Maximum length of the message
type: integer
message_keys:
description: Keys to send messages as events
type: string
put_log_events_disable_retry_limit:
description: If true, put_log_events_retry_limit will be ignored
type: boolean
put_log_events_retry_limit:
description: Maximum count of retry (if exceeding this, the events
will be discarded)
type: integer
put_log_events_retry_wait:
description: Time before retrying PutLogEvents (retry interval increases
exponentially like put_log_events_retry_wait * (2 ^ retry_count))
type: string
region:
description: AWS Region
type: string
remove_log_group_aws_tags_key:
description: Remove field specified by log_group_aws_tags_key
type: string
remove_log_group_name_key:
description: Remove field specified by log_group_name_key
type: string
remove_log_stream_name_key:
description: Remove field specified by log_stream_name_key
type: string
remove_retention_in_days:
description: Remove field specified by retention_in_days
type: string
retention_in_days:
description: Use to set the expiry time for log group when created
with auto_create_stream. (default to no expiry)
type: string
retention_in_days_key:
description: Use specified field of records as retention period
type: string
use_tag_as_group:
description: Use tag as a group name
type: boolean
use_tag_as_stream:
description: Use tag as a stream name
type: boolean
required:
- region
type: object
elasticsearch:
description: Send your logs to Elasticsearch
properties:
application_name:
description: 'Specify the application name for the rollover index
to be created.(default: default)'
type: string
buffer:
properties:
chunk_full_threshold:
description: The percentage of chunk size threshold for flushing.
output plugin will flush the chunk when actual size reaches
chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in
default)
type: string
chunk_limit_records:
description: The max number of events that each chunks can store
in it
type: integer
chunk_limit_size:
description: 'The max size of each chunks: events will be written
into chunks until the size of chunks become this size'
type: string
compress:
description: If you set this option to gzip, you can get Fluentd
to compress data records before writing to buffer chunks.
type: string
delayed_commit_timeout:
description: The timeout seconds until output plugin decides
that async write operation fails
type: string
disable_chunk_backup:
description: Instead of storing unrecoverable chunks in the
backup directory, just discard them. This option is new in
Fluentd v1.2.6.
type: boolean
flush_at_shutdown:
description: The value to specify to flush/write all buffer
chunks at shutdown, or not
type: boolean
flush_interval:
description: 'Default: 60s'
type: string
flush_mode:
description: 'Default: default (equals to lazy if time is specified
as chunk key, interval otherwise) lazy: flush/write chunks
once per timekey interval: flush/write chunks per specified
time via flush_interval immediate: flush/write chunks immediately
after events are appended into chunks'
type: string
flush_thread_burst_interval:
description: The sleep interval seconds of threads between flushes
when output plugin flushes waiting chunks next to next
type: string
flush_thread_count:
description: The number of threads of output plugins, which
is used to write chunks in parallel
type: integer
flush_thread_interval:
description: The sleep interval seconds of threads to wait next
flush trial (when no chunks are waiting)
type: string
overflow_action:
description: 'How output plugin behaves when its buffer queue
is full throw_exception: raise exception to show this error
in log block: block processing of input plugin to emit events
into that buffer drop_oldest_chunk: drop/purge oldest chunk
to accept newly incoming chunk'
type: string
path:
description: 'The path where buffer chunks are stored. The ''*''
is replaced with random characters. It''s highly recommended
to leave this default. (default: operator generated)'
type: string
queue_limit_length:
description: The queue length limitation of this buffer plugin
instance
type: integer
queued_chunks_limit_size:
description: Limit the number of queued chunks. If you set smaller
flush_interval, e.g. 1s, there are lots of small queued chunks
in buffer. This is not good with file buffer because it consumes
lots of fd resources when output destination has a problem.
This parameter mitigates such situations.
type: integer
retry_exponential_backoff_base:
description: The base number of exponential backoff for retries
type: string
retry_forever:
description: If true, plugin will ignore retry_timeout and retry_max_times
options and retry flushing forever
type: boolean
retry_max_interval:
description: The maximum interval seconds for exponential backoff
between retries while failing
type: string
retry_max_times:
description: The maximum number of times to retry to flush while
failing
type: integer
retry_randomize:
description: If true, output plugin will retry after randomized
interval not to do burst retries
type: boolean
retry_secondary_threshold:
description: The ratio of retry_timeout to switch to use secondary
while failing (Maximum valid value is 1.0)
type: string
retry_timeout:
description: The maximum seconds to retry to flush while failing,
until plugin discards buffer chunks
type: string
retry_type:
description: 'exponential_backoff: wait seconds will become
large exponentially per failures periodic: output plugin will
retry periodically with fixed intervals (configured via retry_wait)'
type: string
retry_wait:
description: Seconds to wait before next retry to flush, or
constant factor of exponential backoff
type: string
tags:
description: 'When tag is specified as buffer chunk key, output
plugin writes events into chunks separately per tags. (default:
tag,time)'
type: string
timekey:
description: Output plugin will flush chunks per specified time
(enabled when time is specified in chunk keys)
type: string
timekey_use_utc:
description: Output plugin decides to use UTC or not to format
placeholders using timekey
type: boolean
timekey_wait:
description: Output plugin writes chunks after timekey_wait
seconds later after timekey expiration
type: string
timekey_zone:
description: The timezone (-0700 or Asia/Tokyo) string for formatting
timekey placeholders
type: string
total_limit_size:
description: The size limitation of this buffer plugin instance.
Once the total size of stored buffer reached this threshold,
all append operations will fail with error (and data will
be lost)
type: string
type:
description: Fluentd core bundles memory and file plugins. 3rd
party plugins are also available when installed.
type: string
type: object
bulk_message_request_threshold:
description: 'Configure bulk_message request splitting threshold
size. Default value is 20MB. (20 * 1024 * 1024) If you specify
this size as negative number, bulk_message request splitting feature
will be disabled. (default: 20MB)'
type: string
content_type:
description: 'With content_type application/x-ndjson, elasticsearch
plugin adds application/x-ndjson as Content-Type in payload. (default:
application/json)'
type: string
custom_headers:
description: 'This parameter adds additional headers to request.
Example: {"token":"secret"} (default: {})'
type: string
customize_template:
description: Specify the string and its value to be replaced in
form of hash. Can contain multiple key value pair that would be
replaced in the specified template_file. This setting only creates
template and to add rollover index please check the rollover_index
configuration.
type: string
default_elasticsearch_version:
description: This parameter changes that ES plugin assumes default
Elasticsearch version. The default value is 5.
type: string
deflector_alias:
description: Specify the deflector alias which would be assigned
to the rollover index created. This is useful in case of using
the Elasticsearch rollover API
type: string
exception_backup:
description: 'Indicates whether to backup chunk when ignore exception
occurs. (default: true)'
type: boolean
fail_on_putting_template_retry_exceed:
description: 'Indicates whether to fail when max_retry_putting_template
is exceeded. If you have multiple output plugin, you could use
this property to do not fail on fluentd statup.(default: true)'
type: boolean
flatten_hashes:
description: 'Elasticsearch will complain if you send object and
concrete values to the same field. For example, you might have
logs that look this, from different places: {"people" => 100}
{"people" => {"some" => "thing"}} The second log line will be
rejected by the Elasticsearch parser because objects and concrete
values can''t live in the same field. To combat this, you can
enable hash flattening.'
type: boolean
flatten_hashes_separator:
description: Flatten separator
type: string
host:
description: You can specify Elasticsearch host by this parameter.
(default:localhost)
type: string
hosts:
description: You can specify multiple Elasticsearch hosts with separator
",". If you specify hosts option, host and port options are ignored.
type: string
http_backend:
description: 'With http_backend typhoeus, elasticsearch plugin uses
typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
(default: excon)'
type: string
id_key:
description: https://github.com/uken/fluent-plugin-elasticsearch#id_key
type: string
ignore_exceptions:
description: A list of exception that will be ignored - when the
exception occurs the chunk will be discarded and the buffer retry
mechanism won't be called. It is possible also to specify classes
at higher level in the hierarchy. For example `ignore_exceptions
["Elasticsearch::Transport::Transport::ServerError"]` will match
all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest,
Elasticsearch::Transport::Transport::Errors::ServiceUnavailable,
etc.
type: string
include_index_in_url:
description: With this option set to true, Fluentd manifests the
index name in the request URL (rather than in the request body).
You can use this option to enforce an URL-based access control.
type: boolean
include_tag_key:
description: 'This will add the Fluentd tag in the JSON record.(default:
false)'
type: boolean
include_timestamp:
description: Adds a @timestamp field to the log, following all settings
logstash_format does, except without the restrictions on index_name.
This allows one to log to an alias in Elasticsearch and utilize
the rollover API.
type: boolean
index_date_pattern:
description: 'Specify this to override the index date pattern for
creating a rollover index.(default: now/d)'
type: string
index_prefix:
description: Specify the index prefix for the rollover index to
be created.
type: string
log_es_400_reason:
description: 'By default, the error logger won''t record the reason
for a 400 error from the Elasticsearch API unless you set log_level
to debug. However, this results in a lot of log spam, which isn''t
desirable if all you want is the 400 error reasons. You can set
this true to capture the 400 error reasons without all the other
debug logs. (default: false)'
type: boolean
logstash_dateformat:
description: 'Set the Logstash date format.(default: %Y.%m.%d)'
type: string
logstash_format:
description: 'Enable Logstash log format.(default: false)'
type: boolean
logstash_prefix:
description: 'Set the Logstash prefix.(default: true)'
type: string
logstash_prefix_separator:
description: 'Set the Logstash prefix separator.(default: -)'
type: string
max_retry_get_es_version:
description: 'You can specify times of retry obtaining Elasticsearch
version.(default: 15)'
type: string
max_retry_putting_template:
description: 'You can specify times of retry putting template.(default:
10)'
type: string
password:
description: Password for HTTP Basic authentication.
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
description: Secret key for the value
type: string
name:
description: Name of the kubernetes secret
type: string
required:
- key
- name
type: object
type: object
type: object
path:
description: Path for HTTP Basic authentication.
type: string
pipeline:
description: This param is to set a pipeline id of your elasticsearch
to be added into the request, you can configure ingest node.
type: string
port:
description: 'You can specify Elasticsearch port by this parameter.(default:
9200)'
type: integer
prefer_oj_serializer:
description: 'With default behavior, Elasticsearch client uses Yajl
as JSON encoder/decoder. Oj is the alternative high performance
JSON encoder/decoder. When this parameter sets as true, Elasticsearch
client uses Oj as JSON encoder/decoder. (default: fqlse)'
type: boolean
reconnect_on_error:
description: 'Indicates that the plugin should reset connection
on any error (reconnect on next send). By default it will reconnect
only on "host unreachable exceptions". We recommended to set this
true in the presence of elasticsearch shield.(default: false)'
type: boolean
reload_connections:
description: 'You can tune how the elasticsearch-transport host
reloading feature works.(default: true)'
type: boolean
reload_on_failure:
description: 'Indicates that the elasticsearch-transport will try
to reload the nodes addresses if there is a failure while making
the request, this can be useful to quickly remove a dead node
from the list of addresses.(default: false)'
type: boolean
remove_keys_on_update:
description: Remove keys on update will not update the configured
keys in elasticsearch when a record is being updated. This setting
only has any effect if the write operation is update or upsert.
type: string
remove_keys_on_update_key:
description: This setting allows remove_keys_on_update to be configured
with a key in each record, in much the same way as target_index_key
works.
type: string
request_timeout:
description: 'You can specify HTTP request timeout.(default: 5s)'
type: string
resurrect_after: